problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17387 | rasdani/github-patches | git_diff | saleor__saleor-13132 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Gift card filter by `createdByEmail`
## Problem
Customer support needs to find customers giftcards to be able to resend code, correct balance or inquiry about balance
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/giftcard/filters.py`
Content:
```
1 from typing import List
2 from uuid import UUID
3
4 import django_filters
5 import graphene
6 from django.db.models import Exists, OuterRef, Q
7 from graphql.error import GraphQLError
8
9 from ...account import models as account_models
10 from ...giftcard import models
11 from ...order import models as order_models
12 from ...product import models as product_models
13 from ..core.doc_category import DOC_CATEGORY_GIFT_CARDS
14 from ..core.filters import (
15 GlobalIDMultipleChoiceFilter,
16 ListObjectTypeFilter,
17 MetadataFilterBase,
18 ObjectTypeFilter,
19 )
20 from ..core.types import (
21 BaseInputObjectType,
22 FilterInputObjectType,
23 NonNullList,
24 PriceRangeInput,
25 )
26 from ..utils import resolve_global_ids_to_primary_keys
27 from .enums import GiftCardEventsEnum
28
29
30 def filter_products(qs, _, value):
31 if value:
32 _, product_pks = resolve_global_ids_to_primary_keys(value, "Product")
33 qs = filter_gift_cards_by_products(qs, product_pks)
34 return qs
35
36
37 def filter_gift_cards_by_products(qs, product_ids):
38 products = product_models.Product.objects.filter(pk__in=product_ids)
39 return qs.filter(Exists(products.filter(pk=OuterRef("product_id"))))
40
41
42 def filter_used_by(qs, _, value):
43 if value:
44 _, user_pks = resolve_global_ids_to_primary_keys(value, "User")
45 qs = filter_gift_cards_by_used_by_user(qs, user_pks)
46 return qs
47
48
49 def filter_gift_cards_by_used_by_user(qs, user_pks):
50 users = account_models.User.objects.filter(pk__in=user_pks)
51 return qs.filter(Exists(users.filter(pk=OuterRef("used_by_id"))))
52
53
54 def filter_tags_list(qs, _, value):
55 if not value:
56 return qs
57 tags = models.GiftCardTag.objects.filter(name__in=value)
58 return qs.filter(Exists(tags.filter(pk=OuterRef("tags__id"))))
59
60
61 def filter_gift_card_used(qs, _, value):
62 if value is None:
63 return qs
64 return qs.filter(used_by_email__isnull=not value)
65
66
67 def filter_currency(qs, _, value):
68 if not value:
69 return qs
70 return qs.filter(currency=value)
71
72
73 def _filter_by_price(qs, field, value):
74 lookup = {}
75 if lte := value.get("lte"):
76 lookup[f"{field}_amount__lte"] = lte
77 if gte := value.get("gte"):
78 lookup[f"{field}_amount__gte"] = gte
79 return qs.filter(**lookup)
80
81
82 def filter_code(qs, _, value):
83 if not value:
84 return qs
85 return qs.filter(code=value)
86
87
88 class GiftCardFilter(MetadataFilterBase):
89 tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)
90 products = GlobalIDMultipleChoiceFilter(method=filter_products)
91 used_by = GlobalIDMultipleChoiceFilter(method=filter_used_by)
92 used = django_filters.BooleanFilter(method=filter_gift_card_used)
93 currency = django_filters.CharFilter(method=filter_currency)
94 current_balance = ObjectTypeFilter(
95 input_class=PriceRangeInput, method="filter_current_balance"
96 )
97 initial_balance = ObjectTypeFilter(
98 input_class=PriceRangeInput, method="filter_initial_balance"
99 )
100 is_active = django_filters.BooleanFilter()
101 code = django_filters.CharFilter(method=filter_code)
102
103 class Meta:
104 model = models.GiftCard
105 fields = ["is_active"]
106
107 def filter_current_balance(self, queryset, name, value):
108 check_currency_in_filter_data(self.data)
109 return _filter_by_price(queryset, "current_balance", value)
110
111 def filter_initial_balance(self, queryset, name, value):
112 check_currency_in_filter_data(self.data)
113 return _filter_by_price(queryset, "initial_balance", value)
114
115
116 def check_currency_in_filter_data(filter_data: dict):
117 currency = filter_data.get("currency")
118 if not currency:
119 raise GraphQLError(
120 "You must provide a `currency` filter parameter for filtering by price."
121 )
122
123
124 class GiftCardFilterInput(FilterInputObjectType):
125 class Meta:
126 doc_category = DOC_CATEGORY_GIFT_CARDS
127 filterset_class = GiftCardFilter
128
129
130 def filter_events_by_type(events: List[models.GiftCardEvent], type_value: str):
131 filtered_events = []
132 for event in events:
133 if event.type == type_value:
134 filtered_events.append(event)
135 return filtered_events
136
137
138 def filter_events_by_orders(events: List[models.GiftCardEvent], order_ids: List[str]):
139 order_pks = _get_order_pks(order_ids)
140
141 filtered_events = []
142 for event in events:
143 if event.order_id in order_pks:
144 filtered_events.append(event)
145 return filtered_events
146
147
148 def _get_order_pks(order_ids: List[str]):
149 _, order_pks = resolve_global_ids_to_primary_keys(order_ids, "Order")
150
151 pks = []
152 old_pks = []
153 for pk in order_pks:
154 try:
155 pks.append(UUID(pk))
156 except ValueError:
157 old_pks.append(pk)
158
159 return order_models.Order.objects.filter(
160 Q(id__in=pks) | (Q(use_old_id=True) & Q(number__in=old_pks))
161 ).values_list("id", flat=True)
162
163
164 class GiftCardEventFilterInput(BaseInputObjectType):
165 type = graphene.Argument(GiftCardEventsEnum)
166 orders = NonNullList(graphene.ID)
167
168 class Meta:
169 doc_category = DOC_CATEGORY_GIFT_CARDS
170
171
172 def filter_gift_card_tag_search(qs, _, value):
173 if not value:
174 return qs
175 return qs.filter(name__ilike=value)
176
177
178 class GiftCardTagFilter(django_filters.FilterSet):
179 search = django_filters.CharFilter(method=filter_gift_card_tag_search)
180
181
182 class GiftCardTagFilterInput(FilterInputObjectType):
183 class Meta:
184 doc_category = DOC_CATEGORY_GIFT_CARDS
185 filterset_class = GiftCardTagFilter
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/graphql/giftcard/filters.py b/saleor/graphql/giftcard/filters.py
--- a/saleor/graphql/giftcard/filters.py
+++ b/saleor/graphql/giftcard/filters.py
@@ -85,6 +85,12 @@
return qs.filter(code=value)
+def filter_created_by_email(qs, _, value):
+ if not value:
+ return qs
+ return qs.filter(created_by_email=value)
+
+
class GiftCardFilter(MetadataFilterBase):
tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)
products = GlobalIDMultipleChoiceFilter(method=filter_products)
@@ -99,6 +105,7 @@
)
is_active = django_filters.BooleanFilter()
code = django_filters.CharFilter(method=filter_code)
+ created_by_email = django_filters.CharFilter(method=filter_created_by_email)
class Meta:
model = models.GiftCard
| {"golden_diff": "diff --git a/saleor/graphql/giftcard/filters.py b/saleor/graphql/giftcard/filters.py\n--- a/saleor/graphql/giftcard/filters.py\n+++ b/saleor/graphql/giftcard/filters.py\n@@ -85,6 +85,12 @@\n return qs.filter(code=value)\n \n \n+def filter_created_by_email(qs, _, value):\n+ if not value:\n+ return qs\n+ return qs.filter(created_by_email=value)\n+\n+\n class GiftCardFilter(MetadataFilterBase):\n tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)\n products = GlobalIDMultipleChoiceFilter(method=filter_products)\n@@ -99,6 +105,7 @@\n )\n is_active = django_filters.BooleanFilter()\n code = django_filters.CharFilter(method=filter_code)\n+ created_by_email = django_filters.CharFilter(method=filter_created_by_email)\n \n class Meta:\n model = models.GiftCard\n", "issue": "Gift card filter by `createdByEmail`\n## Problem\r\n\r\nCustomer support needs to find customers giftcards to be able to resend code, correct balance or inquiry about balance\r\n\n", "before_files": [{"content": "from typing import List\nfrom uuid import UUID\n\nimport django_filters\nimport graphene\nfrom django.db.models import Exists, OuterRef, Q\nfrom graphql.error import GraphQLError\n\nfrom ...account import models as account_models\nfrom ...giftcard import models\nfrom ...order import models as order_models\nfrom ...product import models as product_models\nfrom ..core.doc_category import DOC_CATEGORY_GIFT_CARDS\nfrom ..core.filters import (\n GlobalIDMultipleChoiceFilter,\n ListObjectTypeFilter,\n MetadataFilterBase,\n ObjectTypeFilter,\n)\nfrom ..core.types import (\n BaseInputObjectType,\n FilterInputObjectType,\n NonNullList,\n PriceRangeInput,\n)\nfrom ..utils import resolve_global_ids_to_primary_keys\nfrom .enums import GiftCardEventsEnum\n\n\ndef filter_products(qs, _, value):\n if value:\n _, product_pks = resolve_global_ids_to_primary_keys(value, \"Product\")\n qs = filter_gift_cards_by_products(qs, product_pks)\n return qs\n\n\ndef filter_gift_cards_by_products(qs, product_ids):\n products = product_models.Product.objects.filter(pk__in=product_ids)\n return qs.filter(Exists(products.filter(pk=OuterRef(\"product_id\"))))\n\n\ndef filter_used_by(qs, _, value):\n if value:\n _, user_pks = resolve_global_ids_to_primary_keys(value, \"User\")\n qs = filter_gift_cards_by_used_by_user(qs, user_pks)\n return qs\n\n\ndef filter_gift_cards_by_used_by_user(qs, user_pks):\n users = account_models.User.objects.filter(pk__in=user_pks)\n return qs.filter(Exists(users.filter(pk=OuterRef(\"used_by_id\"))))\n\n\ndef filter_tags_list(qs, _, value):\n if not value:\n return qs\n tags = models.GiftCardTag.objects.filter(name__in=value)\n return qs.filter(Exists(tags.filter(pk=OuterRef(\"tags__id\"))))\n\n\ndef filter_gift_card_used(qs, _, value):\n if value is None:\n return qs\n return qs.filter(used_by_email__isnull=not value)\n\n\ndef filter_currency(qs, _, value):\n if not value:\n return qs\n return qs.filter(currency=value)\n\n\ndef _filter_by_price(qs, field, value):\n lookup = {}\n if lte := value.get(\"lte\"):\n lookup[f\"{field}_amount__lte\"] = lte\n if gte := value.get(\"gte\"):\n lookup[f\"{field}_amount__gte\"] = gte\n return qs.filter(**lookup)\n\n\ndef filter_code(qs, _, value):\n if not value:\n return qs\n return qs.filter(code=value)\n\n\nclass GiftCardFilter(MetadataFilterBase):\n tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)\n products = GlobalIDMultipleChoiceFilter(method=filter_products)\n used_by = GlobalIDMultipleChoiceFilter(method=filter_used_by)\n used = django_filters.BooleanFilter(method=filter_gift_card_used)\n currency = django_filters.CharFilter(method=filter_currency)\n current_balance = ObjectTypeFilter(\n input_class=PriceRangeInput, method=\"filter_current_balance\"\n )\n initial_balance = ObjectTypeFilter(\n input_class=PriceRangeInput, method=\"filter_initial_balance\"\n )\n is_active = django_filters.BooleanFilter()\n code = django_filters.CharFilter(method=filter_code)\n\n class Meta:\n model = models.GiftCard\n fields = [\"is_active\"]\n\n def filter_current_balance(self, queryset, name, value):\n check_currency_in_filter_data(self.data)\n return _filter_by_price(queryset, \"current_balance\", value)\n\n def filter_initial_balance(self, queryset, name, value):\n check_currency_in_filter_data(self.data)\n return _filter_by_price(queryset, \"initial_balance\", value)\n\n\ndef check_currency_in_filter_data(filter_data: dict):\n currency = filter_data.get(\"currency\")\n if not currency:\n raise GraphQLError(\n \"You must provide a `currency` filter parameter for filtering by price.\"\n )\n\n\nclass GiftCardFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n filterset_class = GiftCardFilter\n\n\ndef filter_events_by_type(events: List[models.GiftCardEvent], type_value: str):\n filtered_events = []\n for event in events:\n if event.type == type_value:\n filtered_events.append(event)\n return filtered_events\n\n\ndef filter_events_by_orders(events: List[models.GiftCardEvent], order_ids: List[str]):\n order_pks = _get_order_pks(order_ids)\n\n filtered_events = []\n for event in events:\n if event.order_id in order_pks:\n filtered_events.append(event)\n return filtered_events\n\n\ndef _get_order_pks(order_ids: List[str]):\n _, order_pks = resolve_global_ids_to_primary_keys(order_ids, \"Order\")\n\n pks = []\n old_pks = []\n for pk in order_pks:\n try:\n pks.append(UUID(pk))\n except ValueError:\n old_pks.append(pk)\n\n return order_models.Order.objects.filter(\n Q(id__in=pks) | (Q(use_old_id=True) & Q(number__in=old_pks))\n ).values_list(\"id\", flat=True)\n\n\nclass GiftCardEventFilterInput(BaseInputObjectType):\n type = graphene.Argument(GiftCardEventsEnum)\n orders = NonNullList(graphene.ID)\n\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n\n\ndef filter_gift_card_tag_search(qs, _, value):\n if not value:\n return qs\n return qs.filter(name__ilike=value)\n\n\nclass GiftCardTagFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(method=filter_gift_card_tag_search)\n\n\nclass GiftCardTagFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n filterset_class = GiftCardTagFilter\n", "path": "saleor/graphql/giftcard/filters.py"}], "after_files": [{"content": "from typing import List\nfrom uuid import UUID\n\nimport django_filters\nimport graphene\nfrom django.db.models import Exists, OuterRef, Q\nfrom graphql.error import GraphQLError\n\nfrom ...account import models as account_models\nfrom ...giftcard import models\nfrom ...order import models as order_models\nfrom ...product import models as product_models\nfrom ..core.doc_category import DOC_CATEGORY_GIFT_CARDS\nfrom ..core.filters import (\n GlobalIDMultipleChoiceFilter,\n ListObjectTypeFilter,\n MetadataFilterBase,\n ObjectTypeFilter,\n)\nfrom ..core.types import (\n BaseInputObjectType,\n FilterInputObjectType,\n NonNullList,\n PriceRangeInput,\n)\nfrom ..utils import resolve_global_ids_to_primary_keys\nfrom .enums import GiftCardEventsEnum\n\n\ndef filter_products(qs, _, value):\n if value:\n _, product_pks = resolve_global_ids_to_primary_keys(value, \"Product\")\n qs = filter_gift_cards_by_products(qs, product_pks)\n return qs\n\n\ndef filter_gift_cards_by_products(qs, product_ids):\n products = product_models.Product.objects.filter(pk__in=product_ids)\n return qs.filter(Exists(products.filter(pk=OuterRef(\"product_id\"))))\n\n\ndef filter_used_by(qs, _, value):\n if value:\n _, user_pks = resolve_global_ids_to_primary_keys(value, \"User\")\n qs = filter_gift_cards_by_used_by_user(qs, user_pks)\n return qs\n\n\ndef filter_gift_cards_by_used_by_user(qs, user_pks):\n users = account_models.User.objects.filter(pk__in=user_pks)\n return qs.filter(Exists(users.filter(pk=OuterRef(\"used_by_id\"))))\n\n\ndef filter_tags_list(qs, _, value):\n if not value:\n return qs\n tags = models.GiftCardTag.objects.filter(name__in=value)\n return qs.filter(Exists(tags.filter(pk=OuterRef(\"tags__id\"))))\n\n\ndef filter_gift_card_used(qs, _, value):\n if value is None:\n return qs\n return qs.filter(used_by_email__isnull=not value)\n\n\ndef filter_currency(qs, _, value):\n if not value:\n return qs\n return qs.filter(currency=value)\n\n\ndef _filter_by_price(qs, field, value):\n lookup = {}\n if lte := value.get(\"lte\"):\n lookup[f\"{field}_amount__lte\"] = lte\n if gte := value.get(\"gte\"):\n lookup[f\"{field}_amount__gte\"] = gte\n return qs.filter(**lookup)\n\n\ndef filter_code(qs, _, value):\n if not value:\n return qs\n return qs.filter(code=value)\n\n\ndef filter_created_by_email(qs, _, value):\n if not value:\n return qs\n return qs.filter(created_by_email=value)\n\n\nclass GiftCardFilter(MetadataFilterBase):\n tags = ListObjectTypeFilter(input_class=graphene.String, method=filter_tags_list)\n products = GlobalIDMultipleChoiceFilter(method=filter_products)\n used_by = GlobalIDMultipleChoiceFilter(method=filter_used_by)\n used = django_filters.BooleanFilter(method=filter_gift_card_used)\n currency = django_filters.CharFilter(method=filter_currency)\n current_balance = ObjectTypeFilter(\n input_class=PriceRangeInput, method=\"filter_current_balance\"\n )\n initial_balance = ObjectTypeFilter(\n input_class=PriceRangeInput, method=\"filter_initial_balance\"\n )\n is_active = django_filters.BooleanFilter()\n code = django_filters.CharFilter(method=filter_code)\n created_by_email = django_filters.CharFilter(method=filter_created_by_email)\n\n class Meta:\n model = models.GiftCard\n fields = [\"is_active\"]\n\n def filter_current_balance(self, queryset, name, value):\n check_currency_in_filter_data(self.data)\n return _filter_by_price(queryset, \"current_balance\", value)\n\n def filter_initial_balance(self, queryset, name, value):\n check_currency_in_filter_data(self.data)\n return _filter_by_price(queryset, \"initial_balance\", value)\n\n\ndef check_currency_in_filter_data(filter_data: dict):\n currency = filter_data.get(\"currency\")\n if not currency:\n raise GraphQLError(\n \"You must provide a `currency` filter parameter for filtering by price.\"\n )\n\n\nclass GiftCardFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n filterset_class = GiftCardFilter\n\n\ndef filter_events_by_type(events: List[models.GiftCardEvent], type_value: str):\n filtered_events = []\n for event in events:\n if event.type == type_value:\n filtered_events.append(event)\n return filtered_events\n\n\ndef filter_events_by_orders(events: List[models.GiftCardEvent], order_ids: List[str]):\n order_pks = _get_order_pks(order_ids)\n\n filtered_events = []\n for event in events:\n if event.order_id in order_pks:\n filtered_events.append(event)\n return filtered_events\n\n\ndef _get_order_pks(order_ids: List[str]):\n _, order_pks = resolve_global_ids_to_primary_keys(order_ids, \"Order\")\n\n pks = []\n old_pks = []\n for pk in order_pks:\n try:\n pks.append(UUID(pk))\n except ValueError:\n old_pks.append(pk)\n\n return order_models.Order.objects.filter(\n Q(id__in=pks) | (Q(use_old_id=True) & Q(number__in=old_pks))\n ).values_list(\"id\", flat=True)\n\n\nclass GiftCardEventFilterInput(BaseInputObjectType):\n type = graphene.Argument(GiftCardEventsEnum)\n orders = NonNullList(graphene.ID)\n\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n\n\ndef filter_gift_card_tag_search(qs, _, value):\n if not value:\n return qs\n return qs.filter(name__ilike=value)\n\n\nclass GiftCardTagFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(method=filter_gift_card_tag_search)\n\n\nclass GiftCardTagFilterInput(FilterInputObjectType):\n class Meta:\n doc_category = DOC_CATEGORY_GIFT_CARDS\n filterset_class = GiftCardTagFilter\n", "path": "saleor/graphql/giftcard/filters.py"}]} | 2,044 | 215 |
gh_patches_debug_8721 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError "attname" or "nspname" from various endpoints
I have been getting this API error intermittently when making a GET request to the records and columns endpoints. It always happens after making some DDL request, like importing a table, or renaming a columns, or extracting columns -- though I can't reproduce it 100% of the time.
<details>
<summary>(A) Sometimes I get this "nspname" error</summary>
```
Environment:
Request Method: GET
Request URL: http://localhost:8000/api/db/v0/tables/1894/records/?limit=500&offset=0
Django Version: 3.1.14
Python Version: 3.9.16
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'django_property_filter',
'mathesar']
Installed Middleware:
['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'mathesar.middleware.CursorClosedHandlerMiddleware',
'mathesar.middleware.PasswordChangeNeededMiddleware',
'django_userforeignkey.middleware.UserForeignKeyMiddleware',
'django_request_cache.middleware.RequestCacheMiddleware']
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py", line 1167, in __getattr__
return self._index[key]
The above exception ('nspname') was the direct cause of the following exception:
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py", line 125, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 466, in handle_exception
response = exception_handler(exc, context)
File "/code/mathesar/exception_handlers.py", line 59, in mathesar_exception_handler
raise exc
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "/code/mathesar/api/db/viewsets/records.py", line 72, in list
records = paginator.paginate_queryset(
File "/code/mathesar/api/pagination.py", line 81, in paginate_queryset
self.count = table.sa_num_records(filter=filters, search=search)
File "/code/mathesar/models/base.py", line 471, in sa_num_records
table=self._sa_table,
File "/code/mathesar/state/cached_property.py", line 62, in __get__
new_value = self.original_get_fn(instance)
File "/code/mathesar/models/base.py", line 337, in _sa_table
sa_table = reflect_table_from_oid(
File "/code/db/tables/operations/select.py", line 31, in reflect_table_from_oid
tables = reflect_tables_from_oids(
File "/code/db/tables/operations/select.py", line 43, in reflect_tables_from_oids
get_map_of_table_oid_to_schema_name_and_table_name(
File "/code/db/tables/operations/select.py", line 74, in get_map_of_table_oid_to_schema_name_and_table_name
select(pg_namespace.c.nspname, pg_class.c.relname, pg_class.c.oid)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py", line 1169, in __getattr__
util.raise_(AttributeError(key), replace_context=err)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
Exception Type: AttributeError at /api/db/v0/tables/1894/records/
Exception Value: nspname
```
</details>
<details>
<summary>(B) Other times I get this "attname" error</summary>
```
Environment:
Request Method: GET
Request URL: http://localhost:8000/api/db/v0/tables/1824/records/?limit=500&offset=0
Django Version: 3.1.14
Python Version: 3.9.16
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'django_property_filter',
'mathesar']
Installed Middleware:
['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'mathesar.middleware.CursorClosedHandlerMiddleware',
'mathesar.middleware.PasswordChangeNeededMiddleware',
'django_userforeignkey.middleware.UserForeignKeyMiddleware',
'django_request_cache.middleware.RequestCacheMiddleware']
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py", line 1167, in __getattr__
return self._index[key]
The above exception ('attname') was the direct cause of the following exception:
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py", line 125, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 466, in handle_exception
response = exception_handler(exc, context)
File "/code/mathesar/exception_handlers.py", line 59, in mathesar_exception_handler
raise exc
File "/usr/local/lib/python3.9/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "/code/mathesar/api/db/viewsets/records.py", line 55, in list
column_names_to_ids = table.get_column_name_id_bidirectional_map()
File "/code/mathesar/models/base.py", line 554, in get_column_name_id_bidirectional_map
columns_map = bidict({column.name: column.id for column in columns})
File "/usr/local/lib/python3.9/site-packages/django/db/models/query.py", line 287, in __iter__
self._fetch_all()
File "/code/mathesar/utils/prefetch.py", line 158, in _fetch_all
prefetcher.fetch(obj_list, name, self.model, forwarders)
File "/code/mathesar/utils/prefetch.py", line 270, in fetch
related_data = self.filter(data_mapping.keys(), data_mapping.values())
File "/code/mathesar/models/base.py", line 225, in filter
return get_map_of_attnum_to_column_name(
File "/code/db/columns/operations/select.py", line 91, in get_map_of_attnum_to_column_name
triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
File "/code/db/columns/operations/select.py", line 104, in _get_triples_of_column_name_and_attnum_and_table_oid
statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
File "/code/db/columns/operations/select.py", line 180, in _statement_for_triples_of_column_name_and_attnum_and_table_oid
sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py", line 1169, in __getattr__
util.raise_(AttributeError(key), replace_context=err)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
Exception Type: AttributeError at /api/db/v0/tables/1824/records/
Exception Value: attname
```
</details>
The exact same steps will sometimes produce (A), other times produce (B), and other times no error at all. I hit these errors a lot while working on #2140. I was doing a workflow where I'd import `books_sim.ts`, then extract the author's first and last names into a new table. I did it over and over again. I probably hit the error about 75% of the time during import and 60% of the time during column extraction.
https://user-images.githubusercontent.com/42411/208746906-86d06e41-18d0-45da-8d39-32aa8fddecba.mp4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/utils.py`
Content:
```
1 import inspect
2 import warnings
3
4 from psycopg2.errors import UndefinedFunction
5
6 import sqlalchemy
7 from sqlalchemy.exc import ProgrammingError
8
9 from db.records import exceptions
10
11
12 def execute_statement(engine, statement, connection_to_use=None):
13 try:
14 if connection_to_use:
15 return connection_to_use.execute(statement)
16 else:
17 with engine.begin() as conn:
18 return conn.execute(statement)
19 except ProgrammingError as e:
20 if isinstance(e.orig, UndefinedFunction):
21 message = e.orig.args[0].split('\n')[0]
22 raise exceptions.UndefinedFunction(message)
23 else:
24 raise e
25
26
27 def execute_pg_query(engine, query, connection_to_use=None):
28 if isinstance(query, sqlalchemy.sql.expression.Executable):
29 executable = query
30 else:
31 executable = sqlalchemy.select(query)
32 return execute_statement(engine, executable, connection_to_use=connection_to_use).fetchall()
33
34
35 # TODO refactor to use @functools.total_ordering
36 class OrderByIds:
37 """
38 A mixin for ordering based on ids; useful at least for type enums in testing.
39 """
40
41 id: str # noqa: NT001
42
43 def __ge__(self, other):
44 if self._ordering_supported(other):
45 return self.id >= other.id
46 return NotImplemented
47
48 def __gt__(self, other):
49 if self._ordering_supported(other):
50 return self.id > other.id
51 return NotImplemented
52
53 def __le__(self, other):
54 if self._ordering_supported(other):
55 return self.id <= other.id
56 return NotImplemented
57
58 def __lt__(self, other):
59 if self._ordering_supported(other):
60 return self.id < other.id
61 return NotImplemented
62
63 def _ordering_supported(self, other):
64 return hasattr(other, 'id')
65
66
67 def get_module_members_that_satisfy(module, predicate):
68 """
69 Looks at the members of the provided module and filters them using the provided predicate.
70
71 Currently used to automatically collect all concrete subclasses of some abstract superclass
72 found as top-level members of a module.
73 """
74 all_members_in_defining_module = inspect.getmembers(module)
75 return set(
76 member
77 for _, member in all_members_in_defining_module
78 if predicate(member)
79 )
80
81
82 def ignore_type_warning(f):
83 """
84 When loading PostgreSQL system tables, an SAWarning is often generated
85 since they use some exotic, postgres-specific types.
86
87 This decorator allows one to ignore those warnings.
88 """
89 def warning_ignored_func(*args, **kwargs):
90 with warnings.catch_warnings():
91 warnings.filterwarnings("ignore", message="Did not recognize type")
92 return f(*args, **kwargs)
93
94 return warning_ignored_func
95
96
97 @ignore_type_warning
98 def get_pg_catalog_table(table_name, engine, metadata):
99 return sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')
100
101
102 def ignore_duplicate_wrapper(stmt):
103 return f"""
104 DO $$ BEGIN
105 {stmt}
106 EXCEPTION
107 WHEN duplicate_object THEN null;
108 END $$;
109 """
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/utils.py b/db/utils.py
--- a/db/utils.py
+++ b/db/utils.py
@@ -96,7 +96,11 @@
@ignore_type_warning
def get_pg_catalog_table(table_name, engine, metadata):
- return sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')
+ table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')
+ # Refresh metadata if it hasn't reflected correctly. Refer https://github.com/centerofci/mathesar/issues/2138
+ if len(table.c) < 1:
+ table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog', extend_existing=True)
+ return table
def ignore_duplicate_wrapper(stmt):
| {"golden_diff": "diff --git a/db/utils.py b/db/utils.py\n--- a/db/utils.py\n+++ b/db/utils.py\n@@ -96,7 +96,11 @@\n \n @ignore_type_warning\n def get_pg_catalog_table(table_name, engine, metadata):\n- return sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')\n+ table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')\n+ # Refresh metadata if it hasn't reflected correctly. Refer https://github.com/centerofci/mathesar/issues/2138\n+ if len(table.c) < 1:\n+ table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog', extend_existing=True)\n+ return table\n \n \n def ignore_duplicate_wrapper(stmt):\n", "issue": "AttributeError \"attname\" or \"nspname\" from various endpoints\nI have been getting this API error intermittently when making a GET request to the records and columns endpoints. It always happens after making some DDL request, like importing a table, or renaming a columns, or extracting columns -- though I can't reproduce it 100% of the time.\r\n\r\n<details>\r\n<summary>(A) Sometimes I get this \"nspname\" error</summary>\r\n\r\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/api/db/v0/tables/1894/records/?limit=500&offset=0\r\n\r\nDjango Version: 3.1.14\r\nPython Version: 3.9.16\r\nInstalled Applications:\r\n['django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'rest_framework',\r\n 'django_filters',\r\n 'django_property_filter',\r\n 'mathesar']\r\nInstalled Middleware:\r\n['django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'mathesar.middleware.CursorClosedHandlerMiddleware',\r\n 'mathesar.middleware.PasswordChangeNeededMiddleware',\r\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\r\n 'django_request_cache.middleware.RequestCacheMiddleware']\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py\", line 1167, in __getattr__\r\n return self._index[key]\r\n\r\nThe above exception ('nspname') was the direct cause of the following exception:\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py\", line 125, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 466, in handle_exception\r\n response = exception_handler(exc, context)\r\n File \"/code/mathesar/exception_handlers.py\", line 59, in mathesar_exception_handler\r\n raise exc\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"/code/mathesar/api/db/viewsets/records.py\", line 72, in list\r\n records = paginator.paginate_queryset(\r\n File \"/code/mathesar/api/pagination.py\", line 81, in paginate_queryset\r\n self.count = table.sa_num_records(filter=filters, search=search)\r\n File \"/code/mathesar/models/base.py\", line 471, in sa_num_records\r\n table=self._sa_table,\r\n File \"/code/mathesar/state/cached_property.py\", line 62, in __get__\r\n new_value = self.original_get_fn(instance)\r\n File \"/code/mathesar/models/base.py\", line 337, in _sa_table\r\n sa_table = reflect_table_from_oid(\r\n File \"/code/db/tables/operations/select.py\", line 31, in reflect_table_from_oid\r\n tables = reflect_tables_from_oids(\r\n File \"/code/db/tables/operations/select.py\", line 43, in reflect_tables_from_oids\r\n get_map_of_table_oid_to_schema_name_and_table_name(\r\n File \"/code/db/tables/operations/select.py\", line 74, in get_map_of_table_oid_to_schema_name_and_table_name\r\n select(pg_namespace.c.nspname, pg_class.c.relname, pg_class.c.oid)\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py\", line 1169, in __getattr__\r\n util.raise_(AttributeError(key), replace_context=err)\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py\", line 207, in raise_\r\n raise exception\r\n\r\nException Type: AttributeError at /api/db/v0/tables/1894/records/\r\nException Value: nspname\r\n```\r\n\r\n</details>\r\n\r\n<details>\r\n<summary>(B) Other times I get this \"attname\" error</summary>\r\n\r\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/api/db/v0/tables/1824/records/?limit=500&offset=0\r\n\r\nDjango Version: 3.1.14\r\nPython Version: 3.9.16\r\nInstalled Applications:\r\n['django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'rest_framework',\r\n 'django_filters',\r\n 'django_property_filter',\r\n 'mathesar']\r\nInstalled Middleware:\r\n['django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'mathesar.middleware.CursorClosedHandlerMiddleware',\r\n 'mathesar.middleware.PasswordChangeNeededMiddleware',\r\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\r\n 'django_request_cache.middleware.RequestCacheMiddleware']\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py\", line 1167, in __getattr__\r\n return self._index[key]\r\n\r\nThe above exception ('attname') was the direct cause of the following exception:\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/viewsets.py\", line 125, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 466, in handle_exception\r\n response = exception_handler(exc, context)\r\n File \"/code/mathesar/exception_handlers.py\", line 59, in mathesar_exception_handler\r\n raise exc\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"/code/mathesar/api/db/viewsets/records.py\", line 55, in list\r\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\r\n File \"/code/mathesar/models/base.py\", line 554, in get_column_name_id_bidirectional_map\r\n columns_map = bidict({column.name: column.id for column in columns})\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/models/query.py\", line 287, in __iter__\r\n self._fetch_all()\r\n File \"/code/mathesar/utils/prefetch.py\", line 158, in _fetch_all\r\n prefetcher.fetch(obj_list, name, self.model, forwarders)\r\n File \"/code/mathesar/utils/prefetch.py\", line 270, in fetch\r\n related_data = self.filter(data_mapping.keys(), data_mapping.values())\r\n File \"/code/mathesar/models/base.py\", line 225, in filter\r\n return get_map_of_attnum_to_column_name(\r\n File \"/code/db/columns/operations/select.py\", line 91, in get_map_of_attnum_to_column_name\r\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\r\n File \"/code/db/columns/operations/select.py\", line 104, in _get_triples_of_column_name_and_attnum_and_table_oid\r\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\r\n File \"/code/db/columns/operations/select.py\", line 180, in _statement_for_triples_of_column_name_and_attnum_and_table_oid\r\n sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/base.py\", line 1169, in __getattr__\r\n util.raise_(AttributeError(key), replace_context=err)\r\n File \"/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py\", line 207, in raise_\r\n raise exception\r\n\r\nException Type: AttributeError at /api/db/v0/tables/1824/records/\r\nException Value: attname\r\n```\r\n\r\n</details>\r\n\r\nThe exact same steps will sometimes produce (A), other times produce (B), and other times no error at all. I hit these errors a lot while working on #2140. I was doing a workflow where I'd import `books_sim.ts`, then extract the author's first and last names into a new table. I did it over and over again. I probably hit the error about 75% of the time during import and 60% of the time during column extraction.\r\n\r\nhttps://user-images.githubusercontent.com/42411/208746906-86d06e41-18d0-45da-8d39-32aa8fddecba.mp4\r\n\r\n\n", "before_files": [{"content": "import inspect\nimport warnings\n\nfrom psycopg2.errors import UndefinedFunction\n\nimport sqlalchemy\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.records import exceptions\n\n\ndef execute_statement(engine, statement, connection_to_use=None):\n try:\n if connection_to_use:\n return connection_to_use.execute(statement)\n else:\n with engine.begin() as conn:\n return conn.execute(statement)\n except ProgrammingError as e:\n if isinstance(e.orig, UndefinedFunction):\n message = e.orig.args[0].split('\\n')[0]\n raise exceptions.UndefinedFunction(message)\n else:\n raise e\n\n\ndef execute_pg_query(engine, query, connection_to_use=None):\n if isinstance(query, sqlalchemy.sql.expression.Executable):\n executable = query\n else:\n executable = sqlalchemy.select(query)\n return execute_statement(engine, executable, connection_to_use=connection_to_use).fetchall()\n\n\n# TODO refactor to use @functools.total_ordering\nclass OrderByIds:\n \"\"\"\n A mixin for ordering based on ids; useful at least for type enums in testing.\n \"\"\"\n\n id: str # noqa: NT001\n\n def __ge__(self, other):\n if self._ordering_supported(other):\n return self.id >= other.id\n return NotImplemented\n\n def __gt__(self, other):\n if self._ordering_supported(other):\n return self.id > other.id\n return NotImplemented\n\n def __le__(self, other):\n if self._ordering_supported(other):\n return self.id <= other.id\n return NotImplemented\n\n def __lt__(self, other):\n if self._ordering_supported(other):\n return self.id < other.id\n return NotImplemented\n\n def _ordering_supported(self, other):\n return hasattr(other, 'id')\n\n\ndef get_module_members_that_satisfy(module, predicate):\n \"\"\"\n Looks at the members of the provided module and filters them using the provided predicate.\n\n Currently used to automatically collect all concrete subclasses of some abstract superclass\n found as top-level members of a module.\n \"\"\"\n all_members_in_defining_module = inspect.getmembers(module)\n return set(\n member\n for _, member in all_members_in_defining_module\n if predicate(member)\n )\n\n\ndef ignore_type_warning(f):\n \"\"\"\n When loading PostgreSQL system tables, an SAWarning is often generated\n since they use some exotic, postgres-specific types.\n\n This decorator allows one to ignore those warnings.\n \"\"\"\n def warning_ignored_func(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n return f(*args, **kwargs)\n\n return warning_ignored_func\n\n\n@ignore_type_warning\ndef get_pg_catalog_table(table_name, engine, metadata):\n return sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')\n\n\ndef ignore_duplicate_wrapper(stmt):\n return f\"\"\"\n DO $$ BEGIN\n {stmt}\n EXCEPTION\n WHEN duplicate_object THEN null;\n END $$;\n \"\"\"\n", "path": "db/utils.py"}], "after_files": [{"content": "import inspect\nimport warnings\n\nfrom psycopg2.errors import UndefinedFunction\n\nimport sqlalchemy\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.records import exceptions\n\n\ndef execute_statement(engine, statement, connection_to_use=None):\n try:\n if connection_to_use:\n return connection_to_use.execute(statement)\n else:\n with engine.begin() as conn:\n return conn.execute(statement)\n except ProgrammingError as e:\n if isinstance(e.orig, UndefinedFunction):\n message = e.orig.args[0].split('\\n')[0]\n raise exceptions.UndefinedFunction(message)\n else:\n raise e\n\n\ndef execute_pg_query(engine, query, connection_to_use=None):\n if isinstance(query, sqlalchemy.sql.expression.Executable):\n executable = query\n else:\n executable = sqlalchemy.select(query)\n return execute_statement(engine, executable, connection_to_use=connection_to_use).fetchall()\n\n\n# TODO refactor to use @functools.total_ordering\nclass OrderByIds:\n \"\"\"\n A mixin for ordering based on ids; useful at least for type enums in testing.\n \"\"\"\n\n id: str # noqa: NT001\n\n def __ge__(self, other):\n if self._ordering_supported(other):\n return self.id >= other.id\n return NotImplemented\n\n def __gt__(self, other):\n if self._ordering_supported(other):\n return self.id > other.id\n return NotImplemented\n\n def __le__(self, other):\n if self._ordering_supported(other):\n return self.id <= other.id\n return NotImplemented\n\n def __lt__(self, other):\n if self._ordering_supported(other):\n return self.id < other.id\n return NotImplemented\n\n def _ordering_supported(self, other):\n return hasattr(other, 'id')\n\n\ndef get_module_members_that_satisfy(module, predicate):\n \"\"\"\n Looks at the members of the provided module and filters them using the provided predicate.\n\n Currently used to automatically collect all concrete subclasses of some abstract superclass\n found as top-level members of a module.\n \"\"\"\n all_members_in_defining_module = inspect.getmembers(module)\n return set(\n member\n for _, member in all_members_in_defining_module\n if predicate(member)\n )\n\n\ndef ignore_type_warning(f):\n \"\"\"\n When loading PostgreSQL system tables, an SAWarning is often generated\n since they use some exotic, postgres-specific types.\n\n This decorator allows one to ignore those warnings.\n \"\"\"\n def warning_ignored_func(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n return f(*args, **kwargs)\n\n return warning_ignored_func\n\n\n@ignore_type_warning\ndef get_pg_catalog_table(table_name, engine, metadata):\n table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog')\n # Refresh metadata if it hasn't reflected correctly. Refer https://github.com/centerofci/mathesar/issues/2138\n if len(table.c) < 1:\n table = sqlalchemy.Table(table_name, metadata, autoload_with=engine, schema='pg_catalog', extend_existing=True)\n return table\n\n\ndef ignore_duplicate_wrapper(stmt):\n return f\"\"\"\n DO $$ BEGIN\n {stmt}\n EXCEPTION\n WHEN duplicate_object THEN null;\n END $$;\n \"\"\"\n", "path": "db/utils.py"}]} | 3,431 | 174 |
gh_patches_debug_29854 | rasdani/github-patches | git_diff | litestar-org__litestar-1577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files
### Description
When using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.
I will provide a PR with a test.
### URL to code causing the issue
_No response_
### MCVE
_No response_
### Steps to reproduce
_No response_
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
1.51.10
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/response/file.py`
Content:
```
1 from __future__ import annotations
2
3 from email.utils import formatdate
4 from inspect import iscoroutine
5 from mimetypes import guess_type
6 from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Literal, cast
7 from urllib.parse import quote
8 from zlib import adler32
9
10 from litestar.constants import ONE_MEGABYTE
11 from litestar.exceptions import ImproperlyConfiguredException
12 from litestar.file_system import BaseLocalFileSystem, FileSystemAdapter
13 from litestar.response.streaming import StreamingResponse
14 from litestar.status_codes import HTTP_200_OK
15
16 __all__ = ("FileResponse", "async_file_iterator", "create_etag_for_file")
17
18
19 if TYPE_CHECKING:
20 from os import PathLike
21 from os import stat_result as stat_result_type
22
23 from anyio import Path
24
25 from litestar.background_tasks import BackgroundTask, BackgroundTasks
26 from litestar.datastructures.headers import ETag
27 from litestar.enums import MediaType
28 from litestar.types import HTTPResponseBodyEvent, PathType, Receive, ResponseCookies, Send
29 from litestar.types.file_types import FileInfo, FileSystemProtocol
30
31
32 async def async_file_iterator(
33 file_path: PathType, chunk_size: int, adapter: FileSystemAdapter
34 ) -> AsyncGenerator[bytes, None]:
35 """Return an async that asynchronously reads a file and yields its chunks.
36
37 Args:
38 file_path: A path to a file.
39 chunk_size: The chunk file to use.
40 adapter: File system adapter class.
41 adapter: File system adapter class.
42
43 Returns:
44 An async generator.
45 """
46 async with await adapter.open(file_path) as file:
47 while chunk := await file.read(chunk_size):
48 yield chunk
49
50
51 def create_etag_for_file(path: PathType, modified_time: float, file_size: int) -> str:
52 """Create an etag.
53
54 Notes:
55 - Function is derived from flask.
56
57 Returns:
58 An etag.
59 """
60 check = adler32(str(path).encode("utf-8")) & 0xFFFFFFFF
61 return f'"{modified_time}-{file_size}-{check}"'
62
63
64 class FileResponse(StreamingResponse):
65 """A response, streaming a file as response body."""
66
67 __slots__ = (
68 "chunk_size",
69 "content_disposition_type",
70 "etag",
71 "file_path",
72 "filename",
73 "adapter",
74 "file_info",
75 )
76
77 def __init__(
78 self,
79 path: str | PathLike | Path,
80 *,
81 background: BackgroundTask | BackgroundTasks | None = None,
82 chunk_size: int = ONE_MEGABYTE,
83 content_disposition_type: Literal["attachment", "inline"] = "attachment",
84 cookies: ResponseCookies | None = None,
85 encoding: str = "utf-8",
86 etag: ETag | None = None,
87 file_system: FileSystemProtocol | None = None,
88 filename: str | None = None,
89 file_info: FileInfo | None = None,
90 headers: dict[str, Any] | None = None,
91 is_head_response: bool = False,
92 media_type: Literal[MediaType.TEXT] | str | None = None,
93 stat_result: stat_result_type | None = None,
94 status_code: int = HTTP_200_OK,
95 ) -> None:
96 """Initialize ``FileResponse``
97
98 Notes:
99 - This class extends the :class:`StreamingResponse <.response.StreamingResponse>` class.
100
101 Args:
102 path: A file path in one of the supported formats.
103 status_code: An HTTP status code.
104 media_type: A value for the response ``Content-Type`` header. If not provided, the value will be either
105 derived from the filename if provided and supported by the stdlib, or will default to
106 ``application/octet-stream``.
107 background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
108 :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.
109 Defaults to None.
110 headers: A string keyed dictionary of response headers. Header keys are insensitive.
111 cookies: A list of :class:`Cookie <.datastructures.Cookie>` instances to be set under the response
112 ``Set-Cookie`` header.
113 encoding: The encoding to be used for the response headers.
114 is_head_response: Whether the response should send only the headers ("head" request) or also the content.
115 filename: An optional filename to set in the header.
116 stat_result: An optional result of calling :func:os.stat:. If not provided, this will be done by the
117 response constructor.
118 chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.
119 content_disposition_type: The type of the ``Content-Disposition``. Either ``inline`` or ``attachment``.
120 etag: An optional :class:`ETag <.datastructures.ETag>` instance. If not provided, an etag will be
121 generated.
122 file_system: An implementation of the :class:`FileSystemProtocol <.types.FileSystemProtocol>`. If provided
123 it will be used to load the file.
124 file_info: The output of calling :meth:`file_system.info <types.FileSystemProtocol.info>`, equivalent to
125 providing an :class:`os.stat_result`.
126 """
127 if not media_type:
128 mimetype, _ = guess_type(filename) if filename else (None, None)
129 media_type = mimetype or "application/octet-stream"
130
131 self.chunk_size = chunk_size
132 self.content_disposition_type = content_disposition_type
133 self.etag = etag
134 self.file_path = path
135 self.filename = filename or ""
136 self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())
137
138 super().__init__(
139 content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),
140 status_code=status_code,
141 media_type=media_type,
142 background=background,
143 headers=headers,
144 cookies=cookies,
145 encoding=encoding,
146 is_head_response=is_head_response,
147 )
148
149 if file_info:
150 self.file_info: FileInfo | Coroutine[Any, Any, FileInfo] = file_info
151 elif stat_result:
152 self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)
153 else:
154 self.file_info = self.adapter.info(self.file_path)
155
156 @property
157 def content_disposition(self) -> str:
158 """Content disposition.
159
160 Returns:
161 A value for the ``Content-Disposition`` header.
162 """
163 quoted_filename = quote(self.filename)
164 is_utf8 = quoted_filename == self.filename
165 if is_utf8:
166 return f'{self.content_disposition_type}; filename="{self.filename}"'
167 return f"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}"
168
169 @property
170 def content_length(self) -> int:
171 """Content length of the response if applicable.
172
173 Returns:
174 Returns the value of :attr:`stat_result.st_size <os.stat_result.st_size>` to populate the ``Content-Length``
175 header.
176 """
177 if isinstance(self.file_info, dict):
178 return self.file_info["size"]
179 return 0
180
181 async def send_body(self, send: Send, receive: Receive) -> None:
182 """Emit a stream of events correlating with the response body.
183
184 Args:
185 send: The ASGI send function.
186 receive: The ASGI receive function.
187
188 Returns:
189 None
190 """
191 if self.chunk_size < self.content_length:
192 await super().send_body(send=send, receive=receive)
193 return
194
195 async with await self.adapter.open(self.file_path) as file:
196 body_event: HTTPResponseBodyEvent = {
197 "type": "http.response.body",
198 "body": await file.read(),
199 "more_body": False,
200 }
201 await send(body_event)
202
203 async def start_response(self, send: Send) -> None:
204 """Emit the start event of the response. This event includes the headers and status codes.
205
206 Args:
207 send: The ASGI send function.
208
209 Returns:
210 None
211 """
212 try:
213 fs_info = self.file_info = cast(
214 "FileInfo", (await self.file_info if iscoroutine(self.file_info) else self.file_info)
215 )
216 except FileNotFoundError as e:
217 raise ImproperlyConfiguredException(f"{self.file_path} does not exist") from e
218
219 if fs_info["type"] != "file":
220 raise ImproperlyConfiguredException(f"{self.file_path} is not a file")
221
222 self.set_header("last-modified", formatdate(fs_info["mtime"], usegmt=True))
223 self.set_header("content-disposition", self.content_disposition)
224 self.set_etag(
225 self.etag
226 or create_etag_for_file(path=self.file_path, modified_time=fs_info["mtime"], file_size=fs_info["size"])
227 )
228
229 await super().start_response(send=send)
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/response/file.py b/litestar/response/file.py
--- a/litestar/response/file.py
+++ b/litestar/response/file.py
@@ -2,7 +2,7 @@
from email.utils import formatdate
from inspect import iscoroutine
-from mimetypes import guess_type
+from mimetypes import encodings_map, guess_type
from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Literal, cast
from urllib.parse import quote
from zlib import adler32
@@ -28,6 +28,9 @@
from litestar.types import HTTPResponseBodyEvent, PathType, Receive, ResponseCookies, Send
from litestar.types.file_types import FileInfo, FileSystemProtocol
+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.
+encodings_map[".br"] = "br"
+
async def async_file_iterator(
file_path: PathType, chunk_size: int, adapter: FileSystemAdapter
@@ -125,8 +128,11 @@
providing an :class:`os.stat_result`.
"""
if not media_type:
- mimetype, _ = guess_type(filename) if filename else (None, None)
+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)
media_type = mimetype or "application/octet-stream"
+ if content_encoding is not None:
+ headers = headers or {}
+ headers.update({"content-encoding": content_encoding})
self.chunk_size = chunk_size
self.content_disposition_type = content_disposition_type
| {"golden_diff": "diff --git a/litestar/response/file.py b/litestar/response/file.py\n--- a/litestar/response/file.py\n+++ b/litestar/response/file.py\n@@ -2,7 +2,7 @@\n \n from email.utils import formatdate\n from inspect import iscoroutine\n-from mimetypes import guess_type\n+from mimetypes import encodings_map, guess_type\n from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Literal, cast\n from urllib.parse import quote\n from zlib import adler32\n@@ -28,6 +28,9 @@\n from litestar.types import HTTPResponseBodyEvent, PathType, Receive, ResponseCookies, Send\n from litestar.types.file_types import FileInfo, FileSystemProtocol\n \n+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.\n+encodings_map[\".br\"] = \"br\"\n+\n \n async def async_file_iterator(\n file_path: PathType, chunk_size: int, adapter: FileSystemAdapter\n@@ -125,8 +128,11 @@\n providing an :class:`os.stat_result`.\n \"\"\"\n if not media_type:\n- mimetype, _ = guess_type(filename) if filename else (None, None)\n+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n+ if content_encoding is not None:\n+ headers = headers or {}\n+ headers.update({\"content-encoding\": content_encoding})\n \n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n", "issue": "Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files\n### Description\n\nWhen using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.\r\n\r\nI will provide a PR with a test.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n_No response_\n\n### Screenshots\n\n```bash\n\"\"\n```\n\n\n### Logs\n\n_No response_\n\n### Litestar Version\n\n1.51.10\n\n### Platform\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom email.utils import formatdate\nfrom inspect import iscoroutine\nfrom mimetypes import guess_type\nfrom typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Literal, cast\nfrom urllib.parse import quote\nfrom zlib import adler32\n\nfrom litestar.constants import ONE_MEGABYTE\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.file_system import BaseLocalFileSystem, FileSystemAdapter\nfrom litestar.response.streaming import StreamingResponse\nfrom litestar.status_codes import HTTP_200_OK\n\n__all__ = (\"FileResponse\", \"async_file_iterator\", \"create_etag_for_file\")\n\n\nif TYPE_CHECKING:\n from os import PathLike\n from os import stat_result as stat_result_type\n\n from anyio import Path\n\n from litestar.background_tasks import BackgroundTask, BackgroundTasks\n from litestar.datastructures.headers import ETag\n from litestar.enums import MediaType\n from litestar.types import HTTPResponseBodyEvent, PathType, Receive, ResponseCookies, Send\n from litestar.types.file_types import FileInfo, FileSystemProtocol\n\n\nasync def async_file_iterator(\n file_path: PathType, chunk_size: int, adapter: FileSystemAdapter\n) -> AsyncGenerator[bytes, None]:\n \"\"\"Return an async that asynchronously reads a file and yields its chunks.\n\n Args:\n file_path: A path to a file.\n chunk_size: The chunk file to use.\n adapter: File system adapter class.\n adapter: File system adapter class.\n\n Returns:\n An async generator.\n \"\"\"\n async with await adapter.open(file_path) as file:\n while chunk := await file.read(chunk_size):\n yield chunk\n\n\ndef create_etag_for_file(path: PathType, modified_time: float, file_size: int) -> str:\n \"\"\"Create an etag.\n\n Notes:\n - Function is derived from flask.\n\n Returns:\n An etag.\n \"\"\"\n check = adler32(str(path).encode(\"utf-8\")) & 0xFFFFFFFF\n return f'\"{modified_time}-{file_size}-{check}\"'\n\n\nclass FileResponse(StreamingResponse):\n \"\"\"A response, streaming a file as response body.\"\"\"\n\n __slots__ = (\n \"chunk_size\",\n \"content_disposition_type\",\n \"etag\",\n \"file_path\",\n \"filename\",\n \"adapter\",\n \"file_info\",\n )\n\n def __init__(\n self,\n path: str | PathLike | Path,\n *,\n background: BackgroundTask | BackgroundTasks | None = None,\n chunk_size: int = ONE_MEGABYTE,\n content_disposition_type: Literal[\"attachment\", \"inline\"] = \"attachment\",\n cookies: ResponseCookies | None = None,\n encoding: str = \"utf-8\",\n etag: ETag | None = None,\n file_system: FileSystemProtocol | None = None,\n filename: str | None = None,\n file_info: FileInfo | None = None,\n headers: dict[str, Any] | None = None,\n is_head_response: bool = False,\n media_type: Literal[MediaType.TEXT] | str | None = None,\n stat_result: stat_result_type | None = None,\n status_code: int = HTTP_200_OK,\n ) -> None:\n \"\"\"Initialize ``FileResponse``\n\n Notes:\n - This class extends the :class:`StreamingResponse <.response.StreamingResponse>` class.\n\n Args:\n path: A file path in one of the supported formats.\n status_code: An HTTP status code.\n media_type: A value for the response ``Content-Type`` header. If not provided, the value will be either\n derived from the filename if provided and supported by the stdlib, or will default to\n ``application/octet-stream``.\n background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or\n :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of :class:`Cookie <.datastructures.Cookie>` instances to be set under the response\n ``Set-Cookie`` header.\n encoding: The encoding to be used for the response headers.\n is_head_response: Whether the response should send only the headers (\"head\" request) or also the content.\n filename: An optional filename to set in the header.\n stat_result: An optional result of calling :func:os.stat:. If not provided, this will be done by the\n response constructor.\n chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.\n content_disposition_type: The type of the ``Content-Disposition``. Either ``inline`` or ``attachment``.\n etag: An optional :class:`ETag <.datastructures.ETag>` instance. If not provided, an etag will be\n generated.\n file_system: An implementation of the :class:`FileSystemProtocol <.types.FileSystemProtocol>`. If provided\n it will be used to load the file.\n file_info: The output of calling :meth:`file_system.info <types.FileSystemProtocol.info>`, equivalent to\n providing an :class:`os.stat_result`.\n \"\"\"\n if not media_type:\n mimetype, _ = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n\n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n self.etag = etag\n self.file_path = path\n self.filename = filename or \"\"\n self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())\n\n super().__init__(\n content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),\n status_code=status_code,\n media_type=media_type,\n background=background,\n headers=headers,\n cookies=cookies,\n encoding=encoding,\n is_head_response=is_head_response,\n )\n\n if file_info:\n self.file_info: FileInfo | Coroutine[Any, Any, FileInfo] = file_info\n elif stat_result:\n self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)\n else:\n self.file_info = self.adapter.info(self.file_path)\n\n @property\n def content_disposition(self) -> str:\n \"\"\"Content disposition.\n\n Returns:\n A value for the ``Content-Disposition`` header.\n \"\"\"\n quoted_filename = quote(self.filename)\n is_utf8 = quoted_filename == self.filename\n if is_utf8:\n return f'{self.content_disposition_type}; filename=\"{self.filename}\"'\n return f\"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}\"\n\n @property\n def content_length(self) -> int:\n \"\"\"Content length of the response if applicable.\n\n Returns:\n Returns the value of :attr:`stat_result.st_size <os.stat_result.st_size>` to populate the ``Content-Length``\n header.\n \"\"\"\n if isinstance(self.file_info, dict):\n return self.file_info[\"size\"]\n return 0\n\n async def send_body(self, send: Send, receive: Receive) -> None:\n \"\"\"Emit a stream of events correlating with the response body.\n\n Args:\n send: The ASGI send function.\n receive: The ASGI receive function.\n\n Returns:\n None\n \"\"\"\n if self.chunk_size < self.content_length:\n await super().send_body(send=send, receive=receive)\n return\n\n async with await self.adapter.open(self.file_path) as file:\n body_event: HTTPResponseBodyEvent = {\n \"type\": \"http.response.body\",\n \"body\": await file.read(),\n \"more_body\": False,\n }\n await send(body_event)\n\n async def start_response(self, send: Send) -> None:\n \"\"\"Emit the start event of the response. This event includes the headers and status codes.\n\n Args:\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n fs_info = self.file_info = cast(\n \"FileInfo\", (await self.file_info if iscoroutine(self.file_info) else self.file_info)\n )\n except FileNotFoundError as e:\n raise ImproperlyConfiguredException(f\"{self.file_path} does not exist\") from e\n\n if fs_info[\"type\"] != \"file\":\n raise ImproperlyConfiguredException(f\"{self.file_path} is not a file\")\n\n self.set_header(\"last-modified\", formatdate(fs_info[\"mtime\"], usegmt=True))\n self.set_header(\"content-disposition\", self.content_disposition)\n self.set_etag(\n self.etag\n or create_etag_for_file(path=self.file_path, modified_time=fs_info[\"mtime\"], file_size=fs_info[\"size\"])\n )\n\n await super().start_response(send=send)\n", "path": "litestar/response/file.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom email.utils import formatdate\nfrom inspect import iscoroutine\nfrom mimetypes import encodings_map, guess_type\nfrom typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Literal, cast\nfrom urllib.parse import quote\nfrom zlib import adler32\n\nfrom litestar.constants import ONE_MEGABYTE\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.file_system import BaseLocalFileSystem, FileSystemAdapter\nfrom litestar.response.streaming import StreamingResponse\nfrom litestar.status_codes import HTTP_200_OK\n\n__all__ = (\"FileResponse\", \"async_file_iterator\", \"create_etag_for_file\")\n\n\nif TYPE_CHECKING:\n from os import PathLike\n from os import stat_result as stat_result_type\n\n from anyio import Path\n\n from litestar.background_tasks import BackgroundTask, BackgroundTasks\n from litestar.datastructures.headers import ETag\n from litestar.enums import MediaType\n from litestar.types import HTTPResponseBodyEvent, PathType, Receive, ResponseCookies, Send\n from litestar.types.file_types import FileInfo, FileSystemProtocol\n\n# brotli not supported in 'mimetypes.encodings_map' until py 3.9.\nencodings_map[\".br\"] = \"br\"\n\n\nasync def async_file_iterator(\n file_path: PathType, chunk_size: int, adapter: FileSystemAdapter\n) -> AsyncGenerator[bytes, None]:\n \"\"\"Return an async that asynchronously reads a file and yields its chunks.\n\n Args:\n file_path: A path to a file.\n chunk_size: The chunk file to use.\n adapter: File system adapter class.\n adapter: File system adapter class.\n\n Returns:\n An async generator.\n \"\"\"\n async with await adapter.open(file_path) as file:\n while chunk := await file.read(chunk_size):\n yield chunk\n\n\ndef create_etag_for_file(path: PathType, modified_time: float, file_size: int) -> str:\n \"\"\"Create an etag.\n\n Notes:\n - Function is derived from flask.\n\n Returns:\n An etag.\n \"\"\"\n check = adler32(str(path).encode(\"utf-8\")) & 0xFFFFFFFF\n return f'\"{modified_time}-{file_size}-{check}\"'\n\n\nclass FileResponse(StreamingResponse):\n \"\"\"A response, streaming a file as response body.\"\"\"\n\n __slots__ = (\n \"chunk_size\",\n \"content_disposition_type\",\n \"etag\",\n \"file_path\",\n \"filename\",\n \"adapter\",\n \"file_info\",\n )\n\n def __init__(\n self,\n path: str | PathLike | Path,\n *,\n background: BackgroundTask | BackgroundTasks | None = None,\n chunk_size: int = ONE_MEGABYTE,\n content_disposition_type: Literal[\"attachment\", \"inline\"] = \"attachment\",\n cookies: ResponseCookies | None = None,\n encoding: str = \"utf-8\",\n etag: ETag | None = None,\n file_system: FileSystemProtocol | None = None,\n filename: str | None = None,\n file_info: FileInfo | None = None,\n headers: dict[str, Any] | None = None,\n is_head_response: bool = False,\n media_type: Literal[MediaType.TEXT] | str | None = None,\n stat_result: stat_result_type | None = None,\n status_code: int = HTTP_200_OK,\n ) -> None:\n \"\"\"Initialize ``FileResponse``\n\n Notes:\n - This class extends the :class:`StreamingResponse <.response.StreamingResponse>` class.\n\n Args:\n path: A file path in one of the supported formats.\n status_code: An HTTP status code.\n media_type: A value for the response ``Content-Type`` header. If not provided, the value will be either\n derived from the filename if provided and supported by the stdlib, or will default to\n ``application/octet-stream``.\n background: A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or\n :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of :class:`Cookie <.datastructures.Cookie>` instances to be set under the response\n ``Set-Cookie`` header.\n encoding: The encoding to be used for the response headers.\n is_head_response: Whether the response should send only the headers (\"head\" request) or also the content.\n filename: An optional filename to set in the header.\n stat_result: An optional result of calling :func:os.stat:. If not provided, this will be done by the\n response constructor.\n chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.\n content_disposition_type: The type of the ``Content-Disposition``. Either ``inline`` or ``attachment``.\n etag: An optional :class:`ETag <.datastructures.ETag>` instance. If not provided, an etag will be\n generated.\n file_system: An implementation of the :class:`FileSystemProtocol <.types.FileSystemProtocol>`. If provided\n it will be used to load the file.\n file_info: The output of calling :meth:`file_system.info <types.FileSystemProtocol.info>`, equivalent to\n providing an :class:`os.stat_result`.\n \"\"\"\n if not media_type:\n mimetype, content_encoding = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n if content_encoding is not None:\n headers = headers or {}\n headers.update({\"content-encoding\": content_encoding})\n\n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n self.etag = etag\n self.file_path = path\n self.filename = filename or \"\"\n self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())\n\n super().__init__(\n content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),\n status_code=status_code,\n media_type=media_type,\n background=background,\n headers=headers,\n cookies=cookies,\n encoding=encoding,\n is_head_response=is_head_response,\n )\n\n if file_info:\n self.file_info: FileInfo | Coroutine[Any, Any, FileInfo] = file_info\n elif stat_result:\n self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)\n else:\n self.file_info = self.adapter.info(self.file_path)\n\n @property\n def content_disposition(self) -> str:\n \"\"\"Content disposition.\n\n Returns:\n A value for the ``Content-Disposition`` header.\n \"\"\"\n quoted_filename = quote(self.filename)\n is_utf8 = quoted_filename == self.filename\n if is_utf8:\n return f'{self.content_disposition_type}; filename=\"{self.filename}\"'\n return f\"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}\"\n\n @property\n def content_length(self) -> int:\n \"\"\"Content length of the response if applicable.\n\n Returns:\n Returns the value of :attr:`stat_result.st_size <os.stat_result.st_size>` to populate the ``Content-Length``\n header.\n \"\"\"\n if isinstance(self.file_info, dict):\n return self.file_info[\"size\"]\n return 0\n\n async def send_body(self, send: Send, receive: Receive) -> None:\n \"\"\"Emit a stream of events correlating with the response body.\n\n Args:\n send: The ASGI send function.\n receive: The ASGI receive function.\n\n Returns:\n None\n \"\"\"\n if self.chunk_size < self.content_length:\n await super().send_body(send=send, receive=receive)\n return\n\n async with await self.adapter.open(self.file_path) as file:\n body_event: HTTPResponseBodyEvent = {\n \"type\": \"http.response.body\",\n \"body\": await file.read(),\n \"more_body\": False,\n }\n await send(body_event)\n\n async def start_response(self, send: Send) -> None:\n \"\"\"Emit the start event of the response. This event includes the headers and status codes.\n\n Args:\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n fs_info = self.file_info = cast(\n \"FileInfo\", (await self.file_info if iscoroutine(self.file_info) else self.file_info)\n )\n except FileNotFoundError as e:\n raise ImproperlyConfiguredException(f\"{self.file_path} does not exist\") from e\n\n if fs_info[\"type\"] != \"file\":\n raise ImproperlyConfiguredException(f\"{self.file_path} is not a file\")\n\n self.set_header(\"last-modified\", formatdate(fs_info[\"mtime\"], usegmt=True))\n self.set_header(\"content-disposition\", self.content_disposition)\n self.set_etag(\n self.etag\n or create_etag_for_file(path=self.file_path, modified_time=fs_info[\"mtime\"], file_size=fs_info[\"size\"])\n )\n\n await super().start_response(send=send)\n", "path": "litestar/response/file.py"}]} | 2,947 | 345 |
gh_patches_debug_36434 | rasdani/github-patches | git_diff | team-ocean__veros-70 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow for custom setup folders
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `veros/cli/veros_copy_setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import shutil
5 import pkg_resources
6 import functools
7
8 import click
9
10 SETUPDIR = pkg_resources.resource_filename('veros', 'setup')
11 SETUPS = sorted([
12 setup for setup in os.listdir(SETUPDIR)
13 if os.path.isdir(os.path.join(SETUPDIR, setup))
14 and not setup.startswith('_')
15 ])
16 IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']
17
18
19 def copy_setup(setup, to=None):
20 """Copy a standard setup to another directory"""
21 if to is None:
22 to = os.path.join(os.getcwd(), setup)
23
24 parent = os.path.dirname(os.path.realpath(to))
25
26 if not os.path.exists(parent):
27 os.makedirs(parent)
28
29 ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)
30 shutil.copytree(
31 os.path.join(SETUPDIR, setup), to, ignore=ignore
32 )
33
34
35 @click.command('veros-copy-setup')
36 @click.argument('setup', type=click.Choice(SETUPS), metavar='SETUP')
37 @click.option('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,
38 default=None, help='Target directory (default: copy to current working directory)')
39 @functools.wraps(copy_setup)
40 def cli(*args, **kwargs):
41 copy_setup(*args, **kwargs)
42
43
44 if __name__ == '__main__':
45 cli()
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/veros/cli/veros_copy_setup.py b/veros/cli/veros_copy_setup.py
--- a/veros/cli/veros_copy_setup.py
+++ b/veros/cli/veros_copy_setup.py
@@ -7,35 +7,69 @@
import click
-SETUPDIR = pkg_resources.resource_filename('veros', 'setup')
-SETUPS = sorted([
- setup for setup in os.listdir(SETUPDIR)
- if os.path.isdir(os.path.join(SETUPDIR, setup))
- and not setup.startswith('_')
-])
+SETUPDIR_ENVVAR = 'VEROS_SETUP_DIR'
IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']
+SETUPS = {}
+
+setup_dirs = [pkg_resources.resource_filename('veros', 'setup')]
+
+for setup_dir in os.environ.get(SETUPDIR_ENVVAR, '').split(';'):
+ if os.path.isdir(setup_dir):
+ setup_dirs.append(setup_dir)
+
+# populate {setup_name: path} mapping
+for setup_dir in setup_dirs:
+ for setup in os.listdir(setup_dir):
+ setup_path = os.path.join(setup_dir, setup)
+ if not os.path.isdir(setup_path):
+ continue
+ if setup.startswith(('_', '.')):
+ continue
+ SETUPS[setup] = setup_path
+
+SETUP_NAMES = sorted(SETUPS.keys())
def copy_setup(setup, to=None):
- """Copy a standard setup to another directory"""
+ """Copy a standard setup to another directory.
+
+ Argument must be one of: {setups}
+
+ Example:
+
+ $ veros copy-setup global_4deg --to ~/veros-setups/4deg-lowfric
+
+ Further directories containing setup templates can be added to this command
+ via the {setup_envvar} environment variable.
+ """
if to is None:
to = os.path.join(os.getcwd(), setup)
- parent = os.path.dirname(os.path.realpath(to))
+ if os.path.exists(to):
+ raise RuntimeError('Target directory must not exist')
+
+ to_parent = os.path.dirname(os.path.realpath(to))
- if not os.path.exists(parent):
- os.makedirs(parent)
+ if not os.path.exists(to_parent):
+ os.makedirs(to_parent)
ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)
shutil.copytree(
- os.path.join(SETUPDIR, setup), to, ignore=ignore
+ SETUPS[setup], to, ignore=ignore
)
+copy_setup.__doc__ = copy_setup.__doc__.format(
+ setups=', '.join(SETUP_NAMES), setup_envvar=SETUPDIR_ENVVAR
+)
+
+
@click.command('veros-copy-setup')
[email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP')
[email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,
- default=None, help='Target directory (default: copy to current working directory)')
[email protected]('setup', type=click.Choice(SETUP_NAMES), metavar='SETUP')
[email protected]('--to', required=False, default=None,
+ type=click.Path(dir_okay=False, file_okay=False, writable=True),
+ help=('Target directory, must not exist '
+ '(default: copy to current working directory)'))
@functools.wraps(copy_setup)
def cli(*args, **kwargs):
copy_setup(*args, **kwargs)
| {"golden_diff": "diff --git a/veros/cli/veros_copy_setup.py b/veros/cli/veros_copy_setup.py\n--- a/veros/cli/veros_copy_setup.py\n+++ b/veros/cli/veros_copy_setup.py\n@@ -7,35 +7,69 @@\n \n import click\n \n-SETUPDIR = pkg_resources.resource_filename('veros', 'setup')\n-SETUPS = sorted([\n- setup for setup in os.listdir(SETUPDIR)\n- if os.path.isdir(os.path.join(SETUPDIR, setup))\n- and not setup.startswith('_')\n-])\n+SETUPDIR_ENVVAR = 'VEROS_SETUP_DIR'\n IGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']\n+SETUPS = {}\n+\n+setup_dirs = [pkg_resources.resource_filename('veros', 'setup')]\n+\n+for setup_dir in os.environ.get(SETUPDIR_ENVVAR, '').split(';'):\n+ if os.path.isdir(setup_dir):\n+ setup_dirs.append(setup_dir)\n+\n+# populate {setup_name: path} mapping\n+for setup_dir in setup_dirs:\n+ for setup in os.listdir(setup_dir):\n+ setup_path = os.path.join(setup_dir, setup)\n+ if not os.path.isdir(setup_path):\n+ continue\n+ if setup.startswith(('_', '.')):\n+ continue\n+ SETUPS[setup] = setup_path\n+\n+SETUP_NAMES = sorted(SETUPS.keys())\n \n \n def copy_setup(setup, to=None):\n- \"\"\"Copy a standard setup to another directory\"\"\"\n+ \"\"\"Copy a standard setup to another directory.\n+\n+ Argument must be one of: {setups}\n+\n+ Example:\n+\n+ $ veros copy-setup global_4deg --to ~/veros-setups/4deg-lowfric\n+\n+ Further directories containing setup templates can be added to this command\n+ via the {setup_envvar} environment variable.\n+ \"\"\"\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n \n- parent = os.path.dirname(os.path.realpath(to))\n+ if os.path.exists(to):\n+ raise RuntimeError('Target directory must not exist')\n+\n+ to_parent = os.path.dirname(os.path.realpath(to))\n \n- if not os.path.exists(parent):\n- os.makedirs(parent)\n+ if not os.path.exists(to_parent):\n+ os.makedirs(to_parent)\n \n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n- os.path.join(SETUPDIR, setup), to, ignore=ignore\n+ SETUPS[setup], to, ignore=ignore\n )\n \n \n+copy_setup.__doc__ = copy_setup.__doc__.format(\n+ setups=', '.join(SETUP_NAMES), setup_envvar=SETUPDIR_ENVVAR\n+)\n+\n+\n @click.command('veros-copy-setup')\[email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP')\[email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,\n- default=None, help='Target directory (default: copy to current working directory)')\[email protected]('setup', type=click.Choice(SETUP_NAMES), metavar='SETUP')\[email protected]('--to', required=False, default=None,\n+ type=click.Path(dir_okay=False, file_okay=False, writable=True),\n+ help=('Target directory, must not exist '\n+ '(default: copy to current working directory)'))\n @functools.wraps(copy_setup)\n def cli(*args, **kwargs):\n copy_setup(*args, **kwargs)\n", "issue": "Allow for custom setup folders\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport pkg_resources\nimport functools\n\nimport click\n\nSETUPDIR = pkg_resources.resource_filename('veros', 'setup')\nSETUPS = sorted([\n setup for setup in os.listdir(SETUPDIR)\n if os.path.isdir(os.path.join(SETUPDIR, setup))\n and not setup.startswith('_')\n])\nIGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']\n\n\ndef copy_setup(setup, to=None):\n \"\"\"Copy a standard setup to another directory\"\"\"\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n\n parent = os.path.dirname(os.path.realpath(to))\n\n if not os.path.exists(parent):\n os.makedirs(parent)\n\n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n os.path.join(SETUPDIR, setup), to, ignore=ignore\n )\n\n\[email protected]('veros-copy-setup')\[email protected]('setup', type=click.Choice(SETUPS), metavar='SETUP')\[email protected]('--to', type=click.Path(dir_okay=False, file_okay=False), required=False,\n default=None, help='Target directory (default: copy to current working directory)')\[email protected](copy_setup)\ndef cli(*args, **kwargs):\n copy_setup(*args, **kwargs)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "veros/cli/veros_copy_setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport shutil\nimport pkg_resources\nimport functools\n\nimport click\n\nSETUPDIR_ENVVAR = 'VEROS_SETUP_DIR'\nIGNORE_PATTERNS = ['__init__.py', '*.pyc', '__pycache__/']\nSETUPS = {}\n\nsetup_dirs = [pkg_resources.resource_filename('veros', 'setup')]\n\nfor setup_dir in os.environ.get(SETUPDIR_ENVVAR, '').split(';'):\n if os.path.isdir(setup_dir):\n setup_dirs.append(setup_dir)\n\n# populate {setup_name: path} mapping\nfor setup_dir in setup_dirs:\n for setup in os.listdir(setup_dir):\n setup_path = os.path.join(setup_dir, setup)\n if not os.path.isdir(setup_path):\n continue\n if setup.startswith(('_', '.')):\n continue\n SETUPS[setup] = setup_path\n\nSETUP_NAMES = sorted(SETUPS.keys())\n\n\ndef copy_setup(setup, to=None):\n \"\"\"Copy a standard setup to another directory.\n\n Argument must be one of: {setups}\n\n Example:\n\n $ veros copy-setup global_4deg --to ~/veros-setups/4deg-lowfric\n\n Further directories containing setup templates can be added to this command\n via the {setup_envvar} environment variable.\n \"\"\"\n if to is None:\n to = os.path.join(os.getcwd(), setup)\n\n if os.path.exists(to):\n raise RuntimeError('Target directory must not exist')\n\n to_parent = os.path.dirname(os.path.realpath(to))\n\n if not os.path.exists(to_parent):\n os.makedirs(to_parent)\n\n ignore = shutil.ignore_patterns(*IGNORE_PATTERNS)\n shutil.copytree(\n SETUPS[setup], to, ignore=ignore\n )\n\n\ncopy_setup.__doc__ = copy_setup.__doc__.format(\n setups=', '.join(SETUP_NAMES), setup_envvar=SETUPDIR_ENVVAR\n)\n\n\[email protected]('veros-copy-setup')\[email protected]('setup', type=click.Choice(SETUP_NAMES), metavar='SETUP')\[email protected]('--to', required=False, default=None,\n type=click.Path(dir_okay=False, file_okay=False, writable=True),\n help=('Target directory, must not exist '\n '(default: copy to current working directory)'))\[email protected](copy_setup)\ndef cli(*args, **kwargs):\n copy_setup(*args, **kwargs)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "veros/cli/veros_copy_setup.py"}]} | 663 | 786 |
gh_patches_debug_16480 | rasdani/github-patches | git_diff | cloudtools__troposphere-446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Validation of AutoScalingGroup with UpdatePolicy fails when maxSize is an expression
Troposphere version: `troposphere==1.5.0`
Snippet example:
```
t.add_resource(AutoScalingGroup(
"WebserverAutoScalingGroup",
AvailabilityZones=GetAZs(Ref("AWS::Region")),
LaunchConfigurationName=Ref(r['webserver_lc']),
LoadBalancerNames=[Ref(r['webserver_elb'])],
UpdatePolicy=UpdatePolicy(
AutoScalingRollingUpdate=AutoScalingRollingUpdate(
PauseTime='PT5M',
MinInstancesInService="1",
MaxBatchSize='1',
WaitOnResourceSignals=True
)
),
CreationPolicy=CreationPolicy(
ResourceSignal=ResourceSignal(
Timeout='PT15M'
)
),
HealthCheckGracePeriod=300,
HealthCheckType="EC2",
DesiredCapacity=If(
"IsStaging",
'1',
'3',
),
MinSize=If(
"IsStaging",
'1',
'3'
),
MaxSize=If(
"IsStaging",
'1',
'5'
)
))
```
This gives the error:
```
Traceback (most recent call last):
<-- snip -->
packages/troposphere/__init__.py", line 367, in default
return obj.JSONrepr()
File "venv/lib/python2.7/site-packages/troposphere/__init__.py", line 176, in JSONrepr
self.validate()
File "venv/lib/python2.7/site-packages/troposphere/autoscaling.py", line 147, in validate
maxCount = int(self.MaxSize)
TypeError: int() argument must be a string or a number, not 'If'
```
Let me know if you need any other details.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/autoscaling.py`
Content:
```
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap
7 from .validators import boolean, integer
8 from . import cloudformation
9
10
11 EC2_INSTANCE_LAUNCH = "autoscaling:EC2_INSTANCE_LAUNCH"
12 EC2_INSTANCE_LAUNCH_ERROR = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"
13 EC2_INSTANCE_TERMINATE = "autoscaling:EC2_INSTANCE_TERMINATE"
14 EC2_INSTANCE_TERMINATE_ERROR = "autoscaling:EC2_INSTANCE_TERMINATE_ERROR"
15 TEST_NOTIFICATION = "autoscaling:TEST_NOTIFICATION"
16
17 # Termination Policy constants
18 Default = 'Default'
19 OldestInstance = 'OldestInstance'
20 NewestInstance = 'NewestInstance'
21 OldestLaunchConfiguration = 'OldestLaunchConfiguration'
22 ClosestToNextInstanceHour = 'ClosestToNextInstanceHour'
23
24
25 class Tag(AWSHelperFn):
26 def __init__(self, key, value, propogate):
27 self.data = {
28 'Key': key,
29 'Value': value,
30 'PropagateAtLaunch': propogate,
31 }
32
33 def JSONrepr(self):
34 return self.data
35
36
37 class Tags(AWSHelperFn):
38 defaultPropagateAtLaunch = True
39 manyType = [type([]), type(())]
40
41 def __init__(self, **kwargs):
42 self.tags = []
43 for k, v in sorted(kwargs.iteritems()):
44 if type(v) in self.manyType:
45 propagate = str(v[1]).lower()
46 v = v[0]
47 else:
48 propagate = str(self.defaultPropagateAtLaunch).lower()
49 self.tags.append({
50 'Key': k,
51 'Value': v,
52 'PropagateAtLaunch': propagate,
53 })
54
55 # append tags to list
56 def __add__(self, newtags):
57 newtags.tags = self.tags + newtags.tags
58 return newtags
59
60 def JSONrepr(self):
61 return self.tags
62
63
64 class NotificationConfigurations(AWSProperty):
65 props = {
66 'TopicARN': (basestring, True),
67 'NotificationTypes': (list, True),
68 }
69
70
71 class MetricsCollection(AWSProperty):
72 props = {
73 'Granularity': (basestring, True),
74 'Metrics': (list, False),
75 }
76
77
78 class Metadata(AWSHelperFn):
79 def __init__(self, init, authentication=None):
80 self.validate(init, authentication)
81 # get keys and values from init and authentication
82
83 # if there's only one data point, then we know its the default
84 # cfn-init; where the key is 'config'
85 if len(init.data) == 1:
86 initKey, initValue = init.data.popitem()
87 self.data = {initKey: initValue}
88 else:
89 self.data = init.data
90
91 if authentication:
92 authKey, authValue = authentication.data.popitem()
93 self.data[authKey] = authValue
94
95 def validate(self, init, authentication):
96 if not isinstance(init, cloudformation.Init):
97 raise ValueError(
98 'init must be of type cloudformation.Init'
99 )
100
101 is_instance = isinstance(authentication, cloudformation.Authentication)
102 if authentication and not is_instance:
103 raise ValueError(
104 'authentication must be of type cloudformation.Authentication'
105 )
106
107 def JSONrepr(self):
108 return self.data
109
110
111 class AutoScalingGroup(AWSObject):
112 resource_type = "AWS::AutoScaling::AutoScalingGroup"
113
114 props = {
115 'AvailabilityZones': (list, False),
116 'Cooldown': (integer, False),
117 'DesiredCapacity': (integer, False),
118 'HealthCheckGracePeriod': (integer, False),
119 'HealthCheckType': (basestring, False),
120 'InstanceId': (basestring, False),
121 'LaunchConfigurationName': (basestring, False),
122 'LoadBalancerNames': (list, False),
123 'MaxSize': (integer, True),
124 'MetricsCollection': ([MetricsCollection], False),
125 'MinSize': (integer, True),
126 'NotificationConfigurations': ([NotificationConfigurations], False),
127 'PlacementGroup': (basestring, False),
128 'Tags': (list, False),
129 'TerminationPolicies': ([basestring], False),
130 'VPCZoneIdentifier': (list, False),
131 }
132
133 def validate(self):
134 if 'UpdatePolicy' in self.resource:
135 update_policy = self.resource['UpdatePolicy']
136
137 if 'AutoScalingRollingUpdate' in update_policy.properties:
138 rolling_update = update_policy.AutoScalingRollingUpdate
139
140 isMinNoCheck = isinstance(
141 rolling_update.MinInstancesInService,
142 (FindInMap, Ref)
143 )
144 isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref))
145
146 if not (isMinNoCheck or isMaxNoCheck):
147 maxCount = int(self.MaxSize)
148 minCount = int(rolling_update.MinInstancesInService)
149
150 if minCount >= maxCount:
151 raise ValueError(
152 "The UpdatePolicy attribute "
153 "MinInstancesInService must be less than the "
154 "autoscaling group's MaxSize")
155
156 launch_config = self.properties.get('LaunchConfigurationName')
157 instance_id = self.properties.get('InstanceId')
158 if launch_config and instance_id:
159 raise ValueError("LaunchConfigurationName and InstanceId "
160 "are mutually exclusive.")
161 if not launch_config and not instance_id:
162 raise ValueError("Must specify either LaunchConfigurationName or "
163 "InstanceId: http://docs.aws.amazon.com/AWSCloud"
164 "Formation/latest/UserGuide/aws-properties-as-gr"
165 "oup.html#cfn-as-group-instanceid")
166
167 availability_zones = self.properties.get('AvailabilityZones')
168 vpc_zone_identifier = self.properties.get('VPCZoneIdentifier')
169 if not availability_zones and not vpc_zone_identifier:
170 raise ValueError("Must specify AvailabilityZones and/or "
171 "VPCZoneIdentifier: http://docs.aws.amazon.com/A"
172 "WSCloudFormation/latest/UserGuide/aws-propertie"
173 "s-as-group.html#cfn-as-group-vpczoneidentifier")
174 return True
175
176
177 class LaunchConfiguration(AWSObject):
178 resource_type = "AWS::AutoScaling::LaunchConfiguration"
179
180 props = {
181 'AssociatePublicIpAddress': (boolean, False),
182 'BlockDeviceMappings': (list, False),
183 'ClassicLinkVPCId': (basestring, False),
184 'ClassicLinkVPCSecurityGroups': ([basestring], False),
185 'EbsOptimized': (boolean, False),
186 'IamInstanceProfile': (basestring, False),
187 'ImageId': (basestring, True),
188 'InstanceId': (basestring, False),
189 'InstanceMonitoring': (boolean, False),
190 'InstanceType': (basestring, True),
191 'KernelId': (basestring, False),
192 'KeyName': (basestring, False),
193 'Metadata': (Metadata, False),
194 'PlacementTenancy': (basestring, False),
195 'RamDiskId': (basestring, False),
196 'SecurityGroups': (list, False),
197 'SpotPrice': (basestring, False),
198 'UserData': (basestring, False),
199 }
200
201
202 class StepAdjustments(AWSProperty):
203 props = {
204 'MetricIntervalLowerBound': (integer, False),
205 'MetricIntervalUpperBound': (integer, False),
206 'ScalingAdjustment': (integer, True),
207 }
208
209
210 class ScalingPolicy(AWSObject):
211 resource_type = "AWS::AutoScaling::ScalingPolicy"
212
213 props = {
214 'AdjustmentType': (basestring, True),
215 'AutoScalingGroupName': (basestring, True),
216 'Cooldown': (integer, False),
217 'EstimatedInstanceWarmup': (integer, False),
218 'MetricAggregationType': (basestring, False),
219 'MinAdjustmentMagnitude': (integer, False),
220 'PolicyType': (basestring, False),
221 'ScalingAdjustment': (integer, False),
222 'StepAdjustments': ([StepAdjustments], False),
223 }
224
225
226 class ScheduledAction(AWSObject):
227 resource_type = "AWS::AutoScaling::ScheduledAction"
228
229 props = {
230 'AutoScalingGroupName': (basestring, True),
231 'DesiredCapacity': (integer, False),
232 'EndTime': (basestring, False),
233 'MaxSize': (integer, False),
234 'MinSize': (integer, False),
235 'Recurrence': (basestring, False),
236 'StartTime': (basestring, False),
237 }
238
239
240 class LifecycleHook(AWSObject):
241 resource_type = "AWS::AutoScaling::LifecycleHook"
242
243 props = {
244 'AutoScalingGroupName': (basestring, True),
245 'DefaultResult': (basestring, False),
246 'HeartbeatTimeout': (integer, False),
247 'LifecycleHookName': (basestring, False),
248 'LifecycleTransition': (basestring, True),
249 'NotificationMetadata': (basestring, False),
250 'NotificationTargetARN': (basestring, True),
251 'RoleARN': (basestring, True),
252 }
253
254
255 class Trigger(AWSObject):
256 resource_type = "AWS::AutoScaling::Trigger"
257
258 props = {
259 'AutoScalingGroupName': (basestring, True),
260 'BreachDuration': (integer, True),
261 'Dimensions': (list, True),
262 'LowerBreachScaleIncrement': (integer, False),
263 'LowerThreshold': (integer, True),
264 'MetricName': (basestring, True),
265 'Namespace': (basestring, True),
266 'Period': (integer, True),
267 'Statistic': (basestring, True),
268 'Unit': (basestring, False),
269 'UpperBreachScaleIncrement': (integer, False),
270 'UpperThreshold': (integer, True),
271 }
272
273
274 class EBSBlockDevice(AWSProperty):
275 # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html
276 props = {
277 'DeleteOnTermination': (boolean, False),
278 'Encrypted': (boolean, False),
279 'Iops': (integer, False),
280 'SnapshotId': (basestring, False),
281 'VolumeSize': (integer, False),
282 'VolumeType': (basestring, False),
283 }
284
285
286 class BlockDeviceMapping(AWSProperty):
287 # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html
288 props = {
289 'DeviceName': (basestring, True),
290 'Ebs': (EBSBlockDevice, False),
291 'NoDevice': (boolean, False),
292 'VirtualName': (basestring, False),
293 }
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/autoscaling.py b/troposphere/autoscaling.py
--- a/troposphere/autoscaling.py
+++ b/troposphere/autoscaling.py
@@ -3,7 +3,7 @@
#
# See LICENSE file for full license.
-from . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap
+from . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap, If
from .validators import boolean, integer
from . import cloudformation
@@ -141,7 +141,7 @@
rolling_update.MinInstancesInService,
(FindInMap, Ref)
)
- isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref))
+ isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref, If))
if not (isMinNoCheck or isMaxNoCheck):
maxCount = int(self.MaxSize)
| {"golden_diff": "diff --git a/troposphere/autoscaling.py b/troposphere/autoscaling.py\n--- a/troposphere/autoscaling.py\n+++ b/troposphere/autoscaling.py\n@@ -3,7 +3,7 @@\n #\n # See LICENSE file for full license.\n \n-from . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap\n+from . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap, If\n from .validators import boolean, integer\n from . import cloudformation\n \n@@ -141,7 +141,7 @@\n rolling_update.MinInstancesInService,\n (FindInMap, Ref)\n )\n- isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref))\n+ isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref, If))\n \n if not (isMinNoCheck or isMaxNoCheck):\n maxCount = int(self.MaxSize)\n", "issue": "Validation of AutoScalingGroup with UpdatePolicy fails when maxSize is an expression\nTroposphere version: `troposphere==1.5.0`\nSnippet example:\n\n```\nt.add_resource(AutoScalingGroup(\n \"WebserverAutoScalingGroup\",\n AvailabilityZones=GetAZs(Ref(\"AWS::Region\")),\n LaunchConfigurationName=Ref(r['webserver_lc']),\n LoadBalancerNames=[Ref(r['webserver_elb'])],\n\n UpdatePolicy=UpdatePolicy(\n AutoScalingRollingUpdate=AutoScalingRollingUpdate(\n PauseTime='PT5M',\n MinInstancesInService=\"1\",\n MaxBatchSize='1',\n WaitOnResourceSignals=True\n )\n ),\n CreationPolicy=CreationPolicy(\n ResourceSignal=ResourceSignal(\n Timeout='PT15M'\n )\n ),\n HealthCheckGracePeriod=300,\n HealthCheckType=\"EC2\",\n\n DesiredCapacity=If(\n \"IsStaging\",\n '1',\n '3',\n ),\n MinSize=If(\n \"IsStaging\",\n '1',\n '3'\n ),\n MaxSize=If(\n \"IsStaging\",\n '1',\n '5'\n )\n ))\n```\n\nThis gives the error:\n\n```\nTraceback (most recent call last):\n<-- snip -->\npackages/troposphere/__init__.py\", line 367, in default\n return obj.JSONrepr()\n File \"venv/lib/python2.7/site-packages/troposphere/__init__.py\", line 176, in JSONrepr\n self.validate()\n File \"venv/lib/python2.7/site-packages/troposphere/autoscaling.py\", line 147, in validate\n maxCount = int(self.MaxSize)\nTypeError: int() argument must be a string or a number, not 'If'\n```\n\nLet me know if you need any other details.\n\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap\nfrom .validators import boolean, integer\nfrom . import cloudformation\n\n\nEC2_INSTANCE_LAUNCH = \"autoscaling:EC2_INSTANCE_LAUNCH\"\nEC2_INSTANCE_LAUNCH_ERROR = \"autoscaling:EC2_INSTANCE_LAUNCH_ERROR\"\nEC2_INSTANCE_TERMINATE = \"autoscaling:EC2_INSTANCE_TERMINATE\"\nEC2_INSTANCE_TERMINATE_ERROR = \"autoscaling:EC2_INSTANCE_TERMINATE_ERROR\"\nTEST_NOTIFICATION = \"autoscaling:TEST_NOTIFICATION\"\n\n# Termination Policy constants\nDefault = 'Default'\nOldestInstance = 'OldestInstance'\nNewestInstance = 'NewestInstance'\nOldestLaunchConfiguration = 'OldestLaunchConfiguration'\nClosestToNextInstanceHour = 'ClosestToNextInstanceHour'\n\n\nclass Tag(AWSHelperFn):\n def __init__(self, key, value, propogate):\n self.data = {\n 'Key': key,\n 'Value': value,\n 'PropagateAtLaunch': propogate,\n }\n\n def JSONrepr(self):\n return self.data\n\n\nclass Tags(AWSHelperFn):\n defaultPropagateAtLaunch = True\n manyType = [type([]), type(())]\n\n def __init__(self, **kwargs):\n self.tags = []\n for k, v in sorted(kwargs.iteritems()):\n if type(v) in self.manyType:\n propagate = str(v[1]).lower()\n v = v[0]\n else:\n propagate = str(self.defaultPropagateAtLaunch).lower()\n self.tags.append({\n 'Key': k,\n 'Value': v,\n 'PropagateAtLaunch': propagate,\n })\n\n # append tags to list\n def __add__(self, newtags):\n newtags.tags = self.tags + newtags.tags\n return newtags\n\n def JSONrepr(self):\n return self.tags\n\n\nclass NotificationConfigurations(AWSProperty):\n props = {\n 'TopicARN': (basestring, True),\n 'NotificationTypes': (list, True),\n }\n\n\nclass MetricsCollection(AWSProperty):\n props = {\n 'Granularity': (basestring, True),\n 'Metrics': (list, False),\n }\n\n\nclass Metadata(AWSHelperFn):\n def __init__(self, init, authentication=None):\n self.validate(init, authentication)\n # get keys and values from init and authentication\n\n # if there's only one data point, then we know its the default\n # cfn-init; where the key is 'config'\n if len(init.data) == 1:\n initKey, initValue = init.data.popitem()\n self.data = {initKey: initValue}\n else:\n self.data = init.data\n\n if authentication:\n authKey, authValue = authentication.data.popitem()\n self.data[authKey] = authValue\n\n def validate(self, init, authentication):\n if not isinstance(init, cloudformation.Init):\n raise ValueError(\n 'init must be of type cloudformation.Init'\n )\n\n is_instance = isinstance(authentication, cloudformation.Authentication)\n if authentication and not is_instance:\n raise ValueError(\n 'authentication must be of type cloudformation.Authentication'\n )\n\n def JSONrepr(self):\n return self.data\n\n\nclass AutoScalingGroup(AWSObject):\n resource_type = \"AWS::AutoScaling::AutoScalingGroup\"\n\n props = {\n 'AvailabilityZones': (list, False),\n 'Cooldown': (integer, False),\n 'DesiredCapacity': (integer, False),\n 'HealthCheckGracePeriod': (integer, False),\n 'HealthCheckType': (basestring, False),\n 'InstanceId': (basestring, False),\n 'LaunchConfigurationName': (basestring, False),\n 'LoadBalancerNames': (list, False),\n 'MaxSize': (integer, True),\n 'MetricsCollection': ([MetricsCollection], False),\n 'MinSize': (integer, True),\n 'NotificationConfigurations': ([NotificationConfigurations], False),\n 'PlacementGroup': (basestring, False),\n 'Tags': (list, False),\n 'TerminationPolicies': ([basestring], False),\n 'VPCZoneIdentifier': (list, False),\n }\n\n def validate(self):\n if 'UpdatePolicy' in self.resource:\n update_policy = self.resource['UpdatePolicy']\n\n if 'AutoScalingRollingUpdate' in update_policy.properties:\n rolling_update = update_policy.AutoScalingRollingUpdate\n\n isMinNoCheck = isinstance(\n rolling_update.MinInstancesInService,\n (FindInMap, Ref)\n )\n isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref))\n\n if not (isMinNoCheck or isMaxNoCheck):\n maxCount = int(self.MaxSize)\n minCount = int(rolling_update.MinInstancesInService)\n\n if minCount >= maxCount:\n raise ValueError(\n \"The UpdatePolicy attribute \"\n \"MinInstancesInService must be less than the \"\n \"autoscaling group's MaxSize\")\n\n launch_config = self.properties.get('LaunchConfigurationName')\n instance_id = self.properties.get('InstanceId')\n if launch_config and instance_id:\n raise ValueError(\"LaunchConfigurationName and InstanceId \"\n \"are mutually exclusive.\")\n if not launch_config and not instance_id:\n raise ValueError(\"Must specify either LaunchConfigurationName or \"\n \"InstanceId: http://docs.aws.amazon.com/AWSCloud\"\n \"Formation/latest/UserGuide/aws-properties-as-gr\"\n \"oup.html#cfn-as-group-instanceid\")\n\n availability_zones = self.properties.get('AvailabilityZones')\n vpc_zone_identifier = self.properties.get('VPCZoneIdentifier')\n if not availability_zones and not vpc_zone_identifier:\n raise ValueError(\"Must specify AvailabilityZones and/or \"\n \"VPCZoneIdentifier: http://docs.aws.amazon.com/A\"\n \"WSCloudFormation/latest/UserGuide/aws-propertie\"\n \"s-as-group.html#cfn-as-group-vpczoneidentifier\")\n return True\n\n\nclass LaunchConfiguration(AWSObject):\n resource_type = \"AWS::AutoScaling::LaunchConfiguration\"\n\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'BlockDeviceMappings': (list, False),\n 'ClassicLinkVPCId': (basestring, False),\n 'ClassicLinkVPCSecurityGroups': ([basestring], False),\n 'EbsOptimized': (boolean, False),\n 'IamInstanceProfile': (basestring, False),\n 'ImageId': (basestring, True),\n 'InstanceId': (basestring, False),\n 'InstanceMonitoring': (boolean, False),\n 'InstanceType': (basestring, True),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Metadata': (Metadata, False),\n 'PlacementTenancy': (basestring, False),\n 'RamDiskId': (basestring, False),\n 'SecurityGroups': (list, False),\n 'SpotPrice': (basestring, False),\n 'UserData': (basestring, False),\n }\n\n\nclass StepAdjustments(AWSProperty):\n props = {\n 'MetricIntervalLowerBound': (integer, False),\n 'MetricIntervalUpperBound': (integer, False),\n 'ScalingAdjustment': (integer, True),\n }\n\n\nclass ScalingPolicy(AWSObject):\n resource_type = \"AWS::AutoScaling::ScalingPolicy\"\n\n props = {\n 'AdjustmentType': (basestring, True),\n 'AutoScalingGroupName': (basestring, True),\n 'Cooldown': (integer, False),\n 'EstimatedInstanceWarmup': (integer, False),\n 'MetricAggregationType': (basestring, False),\n 'MinAdjustmentMagnitude': (integer, False),\n 'PolicyType': (basestring, False),\n 'ScalingAdjustment': (integer, False),\n 'StepAdjustments': ([StepAdjustments], False),\n }\n\n\nclass ScheduledAction(AWSObject):\n resource_type = \"AWS::AutoScaling::ScheduledAction\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'DesiredCapacity': (integer, False),\n 'EndTime': (basestring, False),\n 'MaxSize': (integer, False),\n 'MinSize': (integer, False),\n 'Recurrence': (basestring, False),\n 'StartTime': (basestring, False),\n }\n\n\nclass LifecycleHook(AWSObject):\n resource_type = \"AWS::AutoScaling::LifecycleHook\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'DefaultResult': (basestring, False),\n 'HeartbeatTimeout': (integer, False),\n 'LifecycleHookName': (basestring, False),\n 'LifecycleTransition': (basestring, True),\n 'NotificationMetadata': (basestring, False),\n 'NotificationTargetARN': (basestring, True),\n 'RoleARN': (basestring, True),\n }\n\n\nclass Trigger(AWSObject):\n resource_type = \"AWS::AutoScaling::Trigger\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'BreachDuration': (integer, True),\n 'Dimensions': (list, True),\n 'LowerBreachScaleIncrement': (integer, False),\n 'LowerThreshold': (integer, True),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'Period': (integer, True),\n 'Statistic': (basestring, True),\n 'Unit': (basestring, False),\n 'UpperBreachScaleIncrement': (integer, False),\n 'UpperThreshold': (integer, True),\n }\n\n\nclass EBSBlockDevice(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (basestring, False),\n }\n\n\nclass BlockDeviceMapping(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html\n props = {\n 'DeviceName': (basestring, True),\n 'Ebs': (EBSBlockDevice, False),\n 'NoDevice': (boolean, False),\n 'VirtualName': (basestring, False),\n }\n", "path": "troposphere/autoscaling.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref, FindInMap, If\nfrom .validators import boolean, integer\nfrom . import cloudformation\n\n\nEC2_INSTANCE_LAUNCH = \"autoscaling:EC2_INSTANCE_LAUNCH\"\nEC2_INSTANCE_LAUNCH_ERROR = \"autoscaling:EC2_INSTANCE_LAUNCH_ERROR\"\nEC2_INSTANCE_TERMINATE = \"autoscaling:EC2_INSTANCE_TERMINATE\"\nEC2_INSTANCE_TERMINATE_ERROR = \"autoscaling:EC2_INSTANCE_TERMINATE_ERROR\"\nTEST_NOTIFICATION = \"autoscaling:TEST_NOTIFICATION\"\n\n# Termination Policy constants\nDefault = 'Default'\nOldestInstance = 'OldestInstance'\nNewestInstance = 'NewestInstance'\nOldestLaunchConfiguration = 'OldestLaunchConfiguration'\nClosestToNextInstanceHour = 'ClosestToNextInstanceHour'\n\n\nclass Tag(AWSHelperFn):\n def __init__(self, key, value, propogate):\n self.data = {\n 'Key': key,\n 'Value': value,\n 'PropagateAtLaunch': propogate,\n }\n\n def JSONrepr(self):\n return self.data\n\n\nclass Tags(AWSHelperFn):\n defaultPropagateAtLaunch = True\n manyType = [type([]), type(())]\n\n def __init__(self, **kwargs):\n self.tags = []\n for k, v in sorted(kwargs.iteritems()):\n if type(v) in self.manyType:\n propagate = str(v[1]).lower()\n v = v[0]\n else:\n propagate = str(self.defaultPropagateAtLaunch).lower()\n self.tags.append({\n 'Key': k,\n 'Value': v,\n 'PropagateAtLaunch': propagate,\n })\n\n # append tags to list\n def __add__(self, newtags):\n newtags.tags = self.tags + newtags.tags\n return newtags\n\n def JSONrepr(self):\n return self.tags\n\n\nclass NotificationConfigurations(AWSProperty):\n props = {\n 'TopicARN': (basestring, True),\n 'NotificationTypes': (list, True),\n }\n\n\nclass MetricsCollection(AWSProperty):\n props = {\n 'Granularity': (basestring, True),\n 'Metrics': (list, False),\n }\n\n\nclass Metadata(AWSHelperFn):\n def __init__(self, init, authentication=None):\n self.validate(init, authentication)\n # get keys and values from init and authentication\n\n # if there's only one data point, then we know its the default\n # cfn-init; where the key is 'config'\n if len(init.data) == 1:\n initKey, initValue = init.data.popitem()\n self.data = {initKey: initValue}\n else:\n self.data = init.data\n\n if authentication:\n authKey, authValue = authentication.data.popitem()\n self.data[authKey] = authValue\n\n def validate(self, init, authentication):\n if not isinstance(init, cloudformation.Init):\n raise ValueError(\n 'init must be of type cloudformation.Init'\n )\n\n is_instance = isinstance(authentication, cloudformation.Authentication)\n if authentication and not is_instance:\n raise ValueError(\n 'authentication must be of type cloudformation.Authentication'\n )\n\n def JSONrepr(self):\n return self.data\n\n\nclass AutoScalingGroup(AWSObject):\n resource_type = \"AWS::AutoScaling::AutoScalingGroup\"\n\n props = {\n 'AvailabilityZones': (list, False),\n 'Cooldown': (integer, False),\n 'DesiredCapacity': (integer, False),\n 'HealthCheckGracePeriod': (integer, False),\n 'HealthCheckType': (basestring, False),\n 'InstanceId': (basestring, False),\n 'LaunchConfigurationName': (basestring, False),\n 'LoadBalancerNames': (list, False),\n 'MaxSize': (integer, True),\n 'MetricsCollection': ([MetricsCollection], False),\n 'MinSize': (integer, True),\n 'NotificationConfigurations': ([NotificationConfigurations], False),\n 'PlacementGroup': (basestring, False),\n 'Tags': (list, False),\n 'TerminationPolicies': ([basestring], False),\n 'VPCZoneIdentifier': (list, False),\n }\n\n def validate(self):\n if 'UpdatePolicy' in self.resource:\n update_policy = self.resource['UpdatePolicy']\n\n if 'AutoScalingRollingUpdate' in update_policy.properties:\n rolling_update = update_policy.AutoScalingRollingUpdate\n\n isMinNoCheck = isinstance(\n rolling_update.MinInstancesInService,\n (FindInMap, Ref)\n )\n isMaxNoCheck = isinstance(self.MaxSize, (FindInMap, Ref, If))\n\n if not (isMinNoCheck or isMaxNoCheck):\n maxCount = int(self.MaxSize)\n minCount = int(rolling_update.MinInstancesInService)\n\n if minCount >= maxCount:\n raise ValueError(\n \"The UpdatePolicy attribute \"\n \"MinInstancesInService must be less than the \"\n \"autoscaling group's MaxSize\")\n\n launch_config = self.properties.get('LaunchConfigurationName')\n instance_id = self.properties.get('InstanceId')\n if launch_config and instance_id:\n raise ValueError(\"LaunchConfigurationName and InstanceId \"\n \"are mutually exclusive.\")\n if not launch_config and not instance_id:\n raise ValueError(\"Must specify either LaunchConfigurationName or \"\n \"InstanceId: http://docs.aws.amazon.com/AWSCloud\"\n \"Formation/latest/UserGuide/aws-properties-as-gr\"\n \"oup.html#cfn-as-group-instanceid\")\n\n availability_zones = self.properties.get('AvailabilityZones')\n vpc_zone_identifier = self.properties.get('VPCZoneIdentifier')\n if not availability_zones and not vpc_zone_identifier:\n raise ValueError(\"Must specify AvailabilityZones and/or \"\n \"VPCZoneIdentifier: http://docs.aws.amazon.com/A\"\n \"WSCloudFormation/latest/UserGuide/aws-propertie\"\n \"s-as-group.html#cfn-as-group-vpczoneidentifier\")\n return True\n\n\nclass LaunchConfiguration(AWSObject):\n resource_type = \"AWS::AutoScaling::LaunchConfiguration\"\n\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'BlockDeviceMappings': (list, False),\n 'ClassicLinkVPCId': (basestring, False),\n 'ClassicLinkVPCSecurityGroups': ([basestring], False),\n 'EbsOptimized': (boolean, False),\n 'IamInstanceProfile': (basestring, False),\n 'ImageId': (basestring, True),\n 'InstanceId': (basestring, False),\n 'InstanceMonitoring': (boolean, False),\n 'InstanceType': (basestring, True),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Metadata': (Metadata, False),\n 'PlacementTenancy': (basestring, False),\n 'RamDiskId': (basestring, False),\n 'SecurityGroups': (list, False),\n 'SpotPrice': (basestring, False),\n 'UserData': (basestring, False),\n }\n\n\nclass StepAdjustments(AWSProperty):\n props = {\n 'MetricIntervalLowerBound': (integer, False),\n 'MetricIntervalUpperBound': (integer, False),\n 'ScalingAdjustment': (integer, True),\n }\n\n\nclass ScalingPolicy(AWSObject):\n resource_type = \"AWS::AutoScaling::ScalingPolicy\"\n\n props = {\n 'AdjustmentType': (basestring, True),\n 'AutoScalingGroupName': (basestring, True),\n 'Cooldown': (integer, False),\n 'EstimatedInstanceWarmup': (integer, False),\n 'MetricAggregationType': (basestring, False),\n 'MinAdjustmentMagnitude': (integer, False),\n 'PolicyType': (basestring, False),\n 'ScalingAdjustment': (integer, False),\n 'StepAdjustments': ([StepAdjustments], False),\n }\n\n\nclass ScheduledAction(AWSObject):\n resource_type = \"AWS::AutoScaling::ScheduledAction\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'DesiredCapacity': (integer, False),\n 'EndTime': (basestring, False),\n 'MaxSize': (integer, False),\n 'MinSize': (integer, False),\n 'Recurrence': (basestring, False),\n 'StartTime': (basestring, False),\n }\n\n\nclass LifecycleHook(AWSObject):\n resource_type = \"AWS::AutoScaling::LifecycleHook\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'DefaultResult': (basestring, False),\n 'HeartbeatTimeout': (integer, False),\n 'LifecycleHookName': (basestring, False),\n 'LifecycleTransition': (basestring, True),\n 'NotificationMetadata': (basestring, False),\n 'NotificationTargetARN': (basestring, True),\n 'RoleARN': (basestring, True),\n }\n\n\nclass Trigger(AWSObject):\n resource_type = \"AWS::AutoScaling::Trigger\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'BreachDuration': (integer, True),\n 'Dimensions': (list, True),\n 'LowerBreachScaleIncrement': (integer, False),\n 'LowerThreshold': (integer, True),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'Period': (integer, True),\n 'Statistic': (basestring, True),\n 'Unit': (basestring, False),\n 'UpperBreachScaleIncrement': (integer, False),\n 'UpperThreshold': (integer, True),\n }\n\n\nclass EBSBlockDevice(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (basestring, False),\n }\n\n\nclass BlockDeviceMapping(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html\n props = {\n 'DeviceName': (basestring, True),\n 'Ebs': (EBSBlockDevice, False),\n 'NoDevice': (boolean, False),\n 'VirtualName': (basestring, False),\n }\n", "path": "troposphere/autoscaling.py"}]} | 3,766 | 211 |
gh_patches_debug_6091 | rasdani/github-patches | git_diff | ARM-DOE__ACT-834 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GeographicPlotDisplay documentation missing description of return value
* ACT version: 2.1.1
* Python version: all
* Operating System: all
### Description
The GeographicPlotDisplay is missing a description of the returned matplotlib axes object. This proved to be a bit confusing on how to tell a student how to adjust the axes limits on the GeographicPlotDisplay, since doing so depends on calling the methods of the returned matplotlib axes object.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/plotting/geodisplay.py`
Content:
```
1 """
2 Stores the class for GeographicPlotDisplay.
3
4 """
5
6
7 import matplotlib
8 import matplotlib.pyplot as plt
9 import numpy as np
10 import pandas as pd
11
12 from .plot import Display
13
14 try:
15 import cartopy.crs as ccrs
16 import cartopy.feature as cfeature
17 from cartopy.io import img_tiles
18
19 CARTOPY_AVAILABLE = True
20 except ImportError:
21 CARTOPY_AVAILABLE = False
22
23
24 class GeographicPlotDisplay(Display):
25 """
26 A class for making geographic tracer plot of aircraft, ship or other moving
27 platform plot.
28
29 This is inherited from the :func:`act.plotting.Display`
30 class and has therefore has the same attributes as that class.
31 See :func:`act.plotting.Display`
32 for more information. There are no additional attributes or parameters
33 to this class.
34
35 In order to create geographic plots, ACT needs the Cartopy package to be
36 installed on your system. More information about
37 Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
38
39 """
40
41 def __init__(self, ds, ds_name=None, **kwargs):
42 if not CARTOPY_AVAILABLE:
43 raise ImportError(
44 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
45 )
46 super().__init__(ds, ds_name, **kwargs)
47 if self.fig is None:
48 self.fig = plt.figure(**kwargs)
49
50 def geoplot(
51 self,
52 data_field=None,
53 lat_field='lat',
54 lon_field='lon',
55 dsname=None,
56 cbar_label=None,
57 title=None,
58 projection=None,
59 plot_buffer=0.08,
60 img_tile=None,
61 img_tile_args={},
62 tile=8,
63 cartopy_feature=None,
64 cmap='rainbow',
65 text=None,
66 gridlines=True,
67 **kwargs,
68 ):
69 """
70 Creates a latitude and longitude plot of a time series data set with
71 data values indicated by color and described with a colorbar.
72 Latitude values must be in degree north (-90 to 90) and
73 longitude must be in degree east (-180 to 180).
74
75 Parameters
76 ----------
77 data_field : str
78 Name of data field in the dataset to plot.
79 lat_field : str
80 Name of latitude field in the dataset to use.
81 lon_field : str
82 Name of longitude field in the dataset to use.
83 dsname : str or None
84 The name of the datastream to plot. Set to None to make ACT
85 attempt to automatically determine this.
86 cbar_label : str
87 Label to use with colorbar. If set to None will attempt
88 to create label from long_name and units.
89 title : str
90 Plot title.
91 projection : cartopy.crs object
92 Project to use on plot. See
93 https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
94 plot_buffer : float
95 Buffer to add around data on plot in lat and lon dimension.
96 img_tile : str
97 Image to use for the plot background. Set to None to not use
98 background image. For all image background types, see:
99 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
100 Default is None.
101 img_tile_args : dict
102 Keyword arguments for the chosen img_tile. These arguments can be
103 found for the corresponding img_tile here:
104 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
105 Default is an empty dictionary.
106 tile : int
107 Tile zoom to use with background image. Higher number indicates
108 more resolution. A value of 8 is typical for a normal sonde plot.
109 cartopy_feature : list of str or str
110 Cartopy feature to add to plot.
111 cmap : str
112 Color map to use for colorbar.
113 text : dictionary
114 Dictionary of {text:[lon,lat]} to add to plot. Can have more
115 than one set of text to add.
116 gridlines : boolean
117 Use latitude and longitude gridlines.
118 **kwargs : keyword arguments
119 Any other keyword arguments that will be passed
120 into :func:`matplotlib.pyplot.scatter` when the figure
121 is made. See the matplotlib documentation for further details
122 on what keyword arguments are available.
123
124 """
125 if dsname is None and len(self._ds.keys()) > 1:
126 raise ValueError(
127 'You must choose a datastream when there are 2 '
128 'or more datasets in the GeographicPlotDisplay '
129 'object.'
130 )
131 elif dsname is None:
132 dsname = list(self._ds.keys())[0]
133
134 if data_field is None:
135 raise ValueError('You must enter the name of the data ' 'to be plotted.')
136
137 if projection is None:
138 if CARTOPY_AVAILABLE:
139 projection = ccrs.PlateCarree()
140
141 # Extract data from the dataset
142 try:
143 lat = self._ds[dsname][lat_field].values
144 except KeyError:
145 raise ValueError(
146 (
147 'You will need to provide the name of the '
148 "field if not '{}' to use for latitude "
149 'data.'
150 ).format(lat_field)
151 )
152 try:
153 lon = self._ds[dsname][lon_field].values
154 except KeyError:
155 raise ValueError(
156 (
157 'You will need to provide the name of the '
158 "field if not '{}' to use for longitude "
159 'data.'
160 ).format(lon_field)
161 )
162
163 # Set up metadata information for display on plot
164 if cbar_label is None:
165 try:
166 cbar_label = (
167 self._ds[dsname][data_field].attrs['long_name']
168 + ' ('
169 + self._ds[dsname][data_field].attrs['units']
170 + ')'
171 )
172 except KeyError:
173 cbar_label = data_field
174
175 lat_limits = [np.nanmin(lat), np.nanmax(lat)]
176 lon_limits = [np.nanmin(lon), np.nanmax(lon)]
177 box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
178 bx_buf = box_size * plot_buffer
179
180 lat_center = np.sum(lat_limits) / 2.0
181 lon_center = np.sum(lon_limits) / 2.0
182
183 lat_limits = [
184 lat_center - box_size / 2.0 - bx_buf,
185 lat_center + box_size / 2.0 + bx_buf,
186 ]
187 lon_limits = [
188 lon_center - box_size / 2.0 - bx_buf,
189 lon_center + box_size / 2.0 + bx_buf,
190 ]
191
192 data = self._ds[dsname][data_field].values
193
194 # Create base plot projection
195 ax = plt.axes(projection=projection)
196 plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
197 ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
198
199 if title is None:
200 try:
201 dim = list(self._ds[dsname][data_field].dims)
202 ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
203 date = ts.strftime('%Y-%m-%d')
204 time_str = ts.strftime('%H:%M:%S')
205 plt.title(' '.join([dsname, 'at', date, time_str]))
206 except NameError:
207 plt.title(dsname)
208 else:
209 plt.title(title)
210
211 if img_tile is not None:
212 tiler = getattr(img_tiles, img_tile)(**img_tile_args)
213 ax.add_image(tiler, tile)
214
215 colorbar_map = None
216 if cmap is not None:
217 colorbar_map = matplotlib.colormaps.get_cmap(cmap)
218 sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
219 cbar = plt.colorbar(sc)
220 cbar.ax.set_ylabel(cbar_label)
221 if cartopy_feature is not None:
222 if isinstance(cartopy_feature, str):
223 cartopy_feature = [cartopy_feature]
224 cartopy_feature = [ii.upper() for ii in cartopy_feature]
225 if 'STATES' in cartopy_feature:
226 ax.add_feature(cfeature.STATES.with_scale('10m'))
227 if 'LAND' in cartopy_feature:
228 ax.add_feature(cfeature.LAND)
229 if 'OCEAN' in cartopy_feature:
230 ax.add_feature(cfeature.OCEAN)
231 if 'COASTLINE' in cartopy_feature:
232 ax.add_feature(cfeature.COASTLINE)
233 if 'BORDERS' in cartopy_feature:
234 ax.add_feature(cfeature.BORDERS, linestyle=':')
235 if 'LAKES' in cartopy_feature:
236 ax.add_feature(cfeature.LAKES, alpha=0.5)
237 if 'RIVERS' in cartopy_feature:
238 ax.add_feature(cfeature.RIVERS)
239 if text is not None:
240 for label, location in text.items():
241 ax.plot(location[0], location[1], marker='*', color='black')
242 ax.text(location[0], location[1], label, color='black')
243
244 if gridlines:
245 if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
246 gl = ax.gridlines(
247 crs=projection,
248 draw_labels=True,
249 linewidth=1,
250 color='gray',
251 alpha=0.5,
252 linestyle='--',
253 )
254 gl.top_labels = False
255 gl.left_labels = True
256 gl.bottom_labels = True
257 gl.right_labels = False
258 gl.xlabel_style = {'size': 6, 'color': 'gray'}
259 gl.ylabel_style = {'size': 6, 'color': 'gray'}
260 else:
261 # Labels are only currently supported for PlateCarree and Mercator
262 gl = ax.gridlines(
263 draw_labels=False,
264 linewidth=1,
265 color='gray',
266 alpha=0.5,
267 linestyle='--',
268 )
269
270 return ax
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py
--- a/act/plotting/geodisplay.py
+++ b/act/plotting/geodisplay.py
@@ -121,6 +121,11 @@
is made. See the matplotlib documentation for further details
on what keyword arguments are available.
+ Returns
+ -------
+ ax : matplotlib axis handle
+ The matplotlib axis handle of the plot.
+
"""
if dsname is None and len(self._ds.keys()) > 1:
raise ValueError(
| {"golden_diff": "diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py\n--- a/act/plotting/geodisplay.py\n+++ b/act/plotting/geodisplay.py\n@@ -121,6 +121,11 @@\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n \n+ Returns\n+ -------\n+ ax : matplotlib axis handle\n+ The matplotlib axis handle of the plot.\n+\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n", "issue": "GeographicPlotDisplay documentation missing description of return value\n* ACT version: 2.1.1\r\n* Python version: all\r\n* Operating System: all\r\n\r\n### Description\r\n\r\nThe GeographicPlotDisplay is missing a description of the returned matplotlib axes object. This proved to be a bit confusing on how to tell a student how to adjust the axes limits on the GeographicPlotDisplay, since doing so depends on calling the methods of the returned matplotlib axes object.\r\n\n", "before_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io import img_tiles\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n img_tile=None,\n img_tile_args={},\n tile=8,\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n img_tile : str\n Image to use for the plot background. Set to None to not use\n background image. For all image background types, see:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is None.\n img_tile_args : dict\n Keyword arguments for the chosen img_tile. These arguments can be\n found for the corresponding img_tile here:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is an empty dictionary.\n tile : int\n Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if img_tile is not None:\n tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}], "after_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io import img_tiles\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n img_tile=None,\n img_tile_args={},\n tile=8,\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n img_tile : str\n Image to use for the plot background. Set to None to not use\n background image. For all image background types, see:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is None.\n img_tile_args : dict\n Keyword arguments for the chosen img_tile. These arguments can be\n found for the corresponding img_tile here:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is an empty dictionary.\n tile : int\n Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n Returns\n -------\n ax : matplotlib axis handle\n The matplotlib axis handle of the plot.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if img_tile is not None:\n tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}]} | 3,274 | 133 |
gh_patches_debug_3767 | rasdani/github-patches | git_diff | kartoza__prj.app-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More menu navigation cleanups
<img width="834" alt="screen shot 2016-06-22 at 00 00 38" src="https://cloud.githubusercontent.com/assets/178003/16247917/6d784382-380c-11e6-91f6-29423d263b8c.png">
- [ ] Make project icon and name clickable - click should take you to e.g. /en/qgis/
- [ ] Changelogs -> Changelog
- [ ] Remove version entries item
- [ ] Remove version thumbnails item
- [ ] Remove add entry item
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/base/templatetags/custom_markup.py`
Content:
```
1 import markdown
2 from django import template
3 from django.template.defaultfilters import stringfilter
4 from django.utils.encoding import force_unicode
5 from django.utils.safestring import mark_safe
6
7 register = template.Library()
8
9
10 @register.filter(name='base_markdown', is_safe=True)
11 @stringfilter
12 def base_markdown(value):
13 extensions = ["nl2br", ]
14
15 return mark_safe(markdown.markdown(force_unicode(value),
16 extensions,
17 safe_mode=True,
18 enable_attributes=False))
19
20
21 @register.filter(name='is_gif', is_safe=True)
22 @stringfilter
23 def is_gif(value):
24 return value[-4:] == '.gif'
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py
--- a/django_project/base/templatetags/custom_markup.py
+++ b/django_project/base/templatetags/custom_markup.py
@@ -22,3 +22,17 @@
@stringfilter
def is_gif(value):
return value[-4:] == '.gif'
+
+
[email protected]_tag('button_span.html', takes_context=True)
+def show_button_icon(context, value):
+
+ context_icon = {
+ 'add': 'glyphicon glyphicon-asterisk',
+ 'update': 'glyphicon glyphicon-pencil',
+ 'delete': 'glyphicon glyphicon-minus'
+ }
+
+ return {
+ 'button_icon': context_icon[value]
+ }
| {"golden_diff": "diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py\n--- a/django_project/base/templatetags/custom_markup.py\n+++ b/django_project/base/templatetags/custom_markup.py\n@@ -22,3 +22,17 @@\n @stringfilter\n def is_gif(value):\n return value[-4:] == '.gif'\n+\n+\[email protected]_tag('button_span.html', takes_context=True)\n+def show_button_icon(context, value):\n+\n+ context_icon = {\n+ 'add': 'glyphicon glyphicon-asterisk',\n+ 'update': 'glyphicon glyphicon-pencil',\n+ 'delete': 'glyphicon glyphicon-minus'\n+ }\n+\n+ return {\n+ 'button_icon': context_icon[value]\n+ }\n", "issue": "More menu navigation cleanups\n<img width=\"834\" alt=\"screen shot 2016-06-22 at 00 00 38\" src=\"https://cloud.githubusercontent.com/assets/178003/16247917/6d784382-380c-11e6-91f6-29423d263b8c.png\">\n- [ ] Make project icon and name clickable - click should take you to e.g. /en/qgis/\n- [ ] Changelogs -> Changelog\n- [ ] Remove version entries item\n- [ ] Remove version thumbnails item\n- [ ] Remove add entry item\n\n", "before_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n\n\[email protected](name='is_gif', is_safe=True)\n@stringfilter\ndef is_gif(value):\n return value[-4:] == '.gif'\n", "path": "django_project/base/templatetags/custom_markup.py"}], "after_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n\n\[email protected](name='is_gif', is_safe=True)\n@stringfilter\ndef is_gif(value):\n return value[-4:] == '.gif'\n\n\[email protected]_tag('button_span.html', takes_context=True)\ndef show_button_icon(context, value):\n\n context_icon = {\n 'add': 'glyphicon glyphicon-asterisk',\n 'update': 'glyphicon glyphicon-pencil',\n 'delete': 'glyphicon glyphicon-minus'\n }\n\n return {\n 'button_icon': context_icon[value]\n }\n", "path": "django_project/base/templatetags/custom_markup.py"}]} | 601 | 179 |
gh_patches_debug_37925 | rasdani/github-patches | git_diff | pulp__pulpcore-4563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sub-optimal performance during exports due to N+1 queries and complex quries
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/importer.py`
Content:
```
1 from django.http import Http404
2 from django_filters.rest_framework import filters
3 from drf_spectacular.utils import extend_schema
4 from rest_framework import mixins
5
6 from pulpcore.app.models import (
7 Import,
8 Importer,
9 PulpImport,
10 PulpImporter,
11 TaskGroup,
12 )
13 from pulpcore.app.response import TaskGroupOperationResponse
14 from pulpcore.app.serializers import (
15 ImportSerializer,
16 ImporterSerializer,
17 PulpImporterSerializer,
18 PulpImportSerializer,
19 TaskGroupOperationResponseSerializer,
20 )
21 from pulpcore.app.tasks import pulp_import
22 from pulpcore.app.viewsets import (
23 BaseFilterSet,
24 NamedModelViewSet,
25 )
26 from pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS
27 from pulpcore.tasking.tasks import dispatch
28
29
30 class ImporterFilter(BaseFilterSet):
31 """Filter for Importers."""
32
33 name = filters.CharFilter()
34
35 class Meta:
36 model = Importer
37 fields = {
38 "name": NAME_FILTER_OPTIONS,
39 }
40
41
42 class ImporterViewSet(
43 NamedModelViewSet,
44 mixins.CreateModelMixin,
45 mixins.UpdateModelMixin,
46 mixins.RetrieveModelMixin,
47 mixins.ListModelMixin,
48 mixins.DestroyModelMixin,
49 ):
50 """ViewSet for Importers."""
51
52 queryset = Importer.objects.all()
53 serializer_class = ImporterSerializer
54 endpoint_name = "importers"
55 router_lookup = "importer"
56 filterset_class = ImporterFilter
57
58
59 class ImportViewSet(
60 NamedModelViewSet,
61 mixins.CreateModelMixin,
62 mixins.RetrieveModelMixin,
63 mixins.ListModelMixin,
64 mixins.DestroyModelMixin,
65 ):
66 """ViewSet for viewing imports from an Importer."""
67
68 endpoint_name = "imports"
69 nest_prefix = "importers"
70 router_lookup = "import"
71 lookup_field = "pk"
72 parent_viewset = ImporterViewSet
73 parent_lookup_kwargs = {"importer_pk": "importer__pk"}
74 serializer_class = ImportSerializer
75 queryset = Import.objects.all()
76
77
78 class PulpImporterViewSet(ImporterViewSet):
79 """ViewSet for PulpImporters."""
80
81 endpoint_name = "pulp"
82 serializer_class = PulpImporterSerializer
83 queryset = PulpImporter.objects.all()
84
85
86 class PulpImportViewSet(ImportViewSet):
87 """ViewSet for PulpImports."""
88
89 parent_viewset = PulpImporterViewSet
90 queryset = PulpImport.objects.all()
91
92 @extend_schema(
93 request=PulpImportSerializer,
94 description="Trigger an asynchronous task to import a Pulp export.",
95 responses={202: TaskGroupOperationResponseSerializer},
96 )
97 def create(self, request, importer_pk):
98 """Import a Pulp export into Pulp."""
99 try:
100 importer = PulpImporter.objects.get(pk=importer_pk)
101 except PulpImporter.DoesNotExist:
102 raise Http404
103
104 serializer = PulpImportSerializer(data=request.data, context={"request": request})
105 serializer.is_valid(raise_exception=True)
106
107 path = serializer.validated_data.get("path")
108 toc = serializer.validated_data.get("toc")
109 task_group = TaskGroup.objects.create(description=f"Import of {path}")
110
111 dispatch(
112 pulp_import,
113 exclusive_resources=[importer],
114 task_group=task_group,
115 kwargs={"importer_pk": importer.pk, "path": path, "toc": toc},
116 )
117 return TaskGroupOperationResponse(task_group, request)
118
```
Path: `pulpcore/app/modelresource.py`
Content:
```
1 from import_export import fields
2 from import_export.widgets import ForeignKeyWidget
3 from logging import getLogger
4
5 from pulpcore.app.models.content import (
6 Artifact,
7 Content,
8 ContentArtifact,
9 )
10 from pulpcore.app.models.repository import Repository
11 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
12 from pulpcore.plugin.importexport import QueryModelResource
13
14
15 log = getLogger(__name__)
16
17
18 #
19 # Artifact and Repository are different from other import-export entities, in that they are not
20 # repo-version-specific.
21 #
22 class ArtifactResource(QueryModelResource):
23 """Resource for import/export of artifacts."""
24
25 def before_import_row(self, row, **kwargs):
26 """
27 Sets digests to None if they are blank strings.
28
29 Args:
30 row (tablib.Dataset row): incoming import-row representing a single Variant.
31 kwargs: args passed along from the import() call.
32
33 """
34 # the export converts None to blank strings but sha384 and sha512 have unique constraints
35 # that get triggered if they are blank. convert checksums back into None if they are blank.
36 for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:
37 if row[checksum] == "":
38 row[checksum] = None
39
40 class Meta:
41 model = Artifact
42 exclude = (
43 "pulp_id",
44 "pulp_created",
45 "pulp_last_updated",
46 )
47 import_id_fields = ("sha256",)
48
49
50 class RepositoryResource(QueryModelResource):
51 class Meta:
52 model = Repository
53 import_id_fields = ("name",)
54 exclude = (
55 "pulp_id",
56 "pulp_created",
57 "pulp_last_updated",
58 "content",
59 )
60
61
62 class ContentArtifactResource(QueryModelResource):
63 """
64 Handles import/export of the ContentArtifact model.
65
66 ContentArtifact is different from other import-export entities because it has no 'natural key'
67 other than a pulp_id, which aren't shared across instances. We do some magic to link up
68 ContentArtifacts to their matching (already-imported) Content.
69
70 Some plugin-models have sub-repositories. We take advantage of the content-mapping
71 machinery to account for those contentartifacts as well.
72 """
73
74 artifact = fields.Field(
75 column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
76 )
77
78 def __init__(self, repo_version=None, content_mapping=None):
79 self.content_mapping = content_mapping
80 super().__init__(repo_version)
81
82 def before_import_row(self, row, **kwargs):
83 """
84 Fixes the content-ptr of an incoming content-artifact row at import time.
85
86 Finds the 'original uuid' of the Content for this row, looks it up as the
87 'upstream_id' of imported Content, and then replaces the Content-pk with its
88 (new) uuid.
89
90 Args:
91 row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.
92 kwargs: args passed along from the import() call.
93
94 Returns:
95 (tablib.Dataset row): row that now points to the new downstream uuid for its content.
96 """
97
98 linked_content = Content.objects.get(upstream_id=row["content"])
99 row["content"] = str(linked_content.pulp_id)
100
101 def set_up_queryset(self):
102 vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
103 if self.content_mapping:
104 all_content = []
105 for content_ids in self.content_mapping.values():
106 all_content.extend(content_ids)
107 vers_content = vers_content.union(
108 ContentArtifact.objects.filter(content__in=all_content)
109 )
110 return vers_content.order_by("content", "relative_path")
111
112 class Meta:
113 model = ContentArtifact
114 import_id_fields = (
115 "content",
116 "relative_path",
117 )
118 exclude = (
119 "pulp_created",
120 "pulp_last_updated",
121 "_artifacts",
122 "pulp_id",
123 )
124
```
Path: `pulpcore/plugin/importexport.py`
Content:
```
1 from import_export import resources
2
3
4 class QueryModelResource(resources.ModelResource):
5 """
6 A ModelResource that knows the RepositoryVersion to use to filter its query
7
8 QueryModelResource has-a repository-version that can be used to limit its export, and a
9 queryset that is derived from that repository-version.
10
11 A plugin-writer will subclass their ModelResources from QueryModelResource,
12 and use it to define the limiting query
13
14 Attributes:
15
16 repo_version (models.RepositoryVersion): The RepositoryVersion whose content we would like
17 to export
18 queryset (django.db.models.query.QuerySet): filtering queryset for this resource
19 (driven by repo_version)
20 """
21
22 def set_up_queryset(self):
23 return None
24
25 def __init__(self, repo_version=None):
26 self.repo_version = repo_version
27 if repo_version:
28 self.queryset = self.set_up_queryset()
29
30 class Meta:
31 exclude = ("pulp_id", "pulp_created", "pulp_last_updated")
32
33
34 class BaseContentResource(QueryModelResource):
35 """
36 A QueryModelResource that knows how to fill in the 'upstream_id' export-field
37
38 BaseContentResource knows to de/hydrate upstream_id with the content-being-exported's pulp_id.
39
40 All Content-based resources being import/exported should subclass from this class.
41 """
42
43 # An optional mapping that maps Content to Repositories. Useful when Content is sometimes not
44 # tied directly to a Repository but rather to a subrepo. Formatting:
45 #
46 # {"<repo name>": ["<content upstream_id>", "..."]}
47 #
48 content_mapping = None
49
50 class Meta:
51 exclude = QueryModelResource.Meta.exclude + ("_artifacts", "content", "content_ptr")
52
53 def dehydrate_upstream_id(self, content):
54 return str(content.pulp_id)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -43,6 +43,7 @@
"pulp_id",
"pulp_created",
"pulp_last_updated",
+ "timestamp_of_interest",
)
import_id_fields = ("sha256",)
@@ -99,15 +100,20 @@
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ content_pks = set(self.repo_version.content.values_list("pk", flat=True))
+
if self.content_mapping:
- all_content = []
for content_ids in self.content_mapping.values():
- all_content.extend(content_ids)
- vers_content = vers_content.union(
- ContentArtifact.objects.filter(content__in=all_content)
- )
- return vers_content.order_by("content", "relative_path")
+ content_pks |= set(content_ids)
+
+ return (
+ ContentArtifact.objects.filter(content__in=content_pks)
+ .order_by("content", "relative_path")
+ .select_related("artifact")
+ )
+
+ def dehydrate_content(self, content_artifact):
+ return str(content_artifact.content_id)
class Meta:
model = ContentArtifact
diff --git a/pulpcore/app/viewsets/importer.py b/pulpcore/app/viewsets/importer.py
--- a/pulpcore/app/viewsets/importer.py
+++ b/pulpcore/app/viewsets/importer.py
@@ -106,7 +106,9 @@
path = serializer.validated_data.get("path")
toc = serializer.validated_data.get("toc")
- task_group = TaskGroup.objects.create(description=f"Import of {path}")
+ task_group = TaskGroup.objects.create(
+ description="Import of {path}".format(path=path or toc)
+ )
dispatch(
pulp_import,
diff --git a/pulpcore/plugin/importexport.py b/pulpcore/plugin/importexport.py
--- a/pulpcore/plugin/importexport.py
+++ b/pulpcore/plugin/importexport.py
@@ -22,6 +22,9 @@
def set_up_queryset(self):
return None
+ def dehydrate_pulp_domain(self, content):
+ return str(content.pulp_domain_id)
+
def __init__(self, repo_version=None):
self.repo_version = repo_version
if repo_version:
@@ -47,8 +50,13 @@
#
content_mapping = None
- class Meta:
- exclude = QueryModelResource.Meta.exclude + ("_artifacts", "content", "content_ptr")
-
def dehydrate_upstream_id(self, content):
return str(content.pulp_id)
+
+ class Meta:
+ exclude = QueryModelResource.Meta.exclude + (
+ "_artifacts",
+ "content",
+ "content_ptr",
+ "timestamp_of_interest",
+ )
| {"golden_diff": "diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py\n--- a/pulpcore/app/modelresource.py\n+++ b/pulpcore/app/modelresource.py\n@@ -43,6 +43,7 @@\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n+ \"timestamp_of_interest\",\n )\n import_id_fields = (\"sha256\",)\n \n@@ -99,15 +100,20 @@\n row[\"content\"] = str(linked_content.pulp_id)\n \n def set_up_queryset(self):\n- vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n+ content_pks = set(self.repo_version.content.values_list(\"pk\", flat=True))\n+\n if self.content_mapping:\n- all_content = []\n for content_ids in self.content_mapping.values():\n- all_content.extend(content_ids)\n- vers_content = vers_content.union(\n- ContentArtifact.objects.filter(content__in=all_content)\n- )\n- return vers_content.order_by(\"content\", \"relative_path\")\n+ content_pks |= set(content_ids)\n+\n+ return (\n+ ContentArtifact.objects.filter(content__in=content_pks)\n+ .order_by(\"content\", \"relative_path\")\n+ .select_related(\"artifact\")\n+ )\n+\n+ def dehydrate_content(self, content_artifact):\n+ return str(content_artifact.content_id)\n \n class Meta:\n model = ContentArtifact\ndiff --git a/pulpcore/app/viewsets/importer.py b/pulpcore/app/viewsets/importer.py\n--- a/pulpcore/app/viewsets/importer.py\n+++ b/pulpcore/app/viewsets/importer.py\n@@ -106,7 +106,9 @@\n \n path = serializer.validated_data.get(\"path\")\n toc = serializer.validated_data.get(\"toc\")\n- task_group = TaskGroup.objects.create(description=f\"Import of {path}\")\n+ task_group = TaskGroup.objects.create(\n+ description=\"Import of {path}\".format(path=path or toc)\n+ )\n \n dispatch(\n pulp_import,\ndiff --git a/pulpcore/plugin/importexport.py b/pulpcore/plugin/importexport.py\n--- a/pulpcore/plugin/importexport.py\n+++ b/pulpcore/plugin/importexport.py\n@@ -22,6 +22,9 @@\n def set_up_queryset(self):\n return None\n \n+ def dehydrate_pulp_domain(self, content):\n+ return str(content.pulp_domain_id)\n+\n def __init__(self, repo_version=None):\n self.repo_version = repo_version\n if repo_version:\n@@ -47,8 +50,13 @@\n #\n content_mapping = None\n \n- class Meta:\n- exclude = QueryModelResource.Meta.exclude + (\"_artifacts\", \"content\", \"content_ptr\")\n-\n def dehydrate_upstream_id(self, content):\n return str(content.pulp_id)\n+\n+ class Meta:\n+ exclude = QueryModelResource.Meta.exclude + (\n+ \"_artifacts\",\n+ \"content\",\n+ \"content_ptr\",\n+ \"timestamp_of_interest\",\n+ )\n", "issue": "Sub-optimal performance during exports due to N+1 queries and complex quries\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from django.http import Http404\nfrom django_filters.rest_framework import filters\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\n\nfrom pulpcore.app.models import (\n Import,\n Importer,\n PulpImport,\n PulpImporter,\n TaskGroup,\n)\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.serializers import (\n ImportSerializer,\n ImporterSerializer,\n PulpImporterSerializer,\n PulpImportSerializer,\n TaskGroupOperationResponseSerializer,\n)\nfrom pulpcore.app.tasks import pulp_import\nfrom pulpcore.app.viewsets import (\n BaseFilterSet,\n NamedModelViewSet,\n)\nfrom pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ImporterFilter(BaseFilterSet):\n \"\"\"Filter for Importers.\"\"\"\n\n name = filters.CharFilter()\n\n class Meta:\n model = Importer\n fields = {\n \"name\": NAME_FILTER_OPTIONS,\n }\n\n\nclass ImporterViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"ViewSet for Importers.\"\"\"\n\n queryset = Importer.objects.all()\n serializer_class = ImporterSerializer\n endpoint_name = \"importers\"\n router_lookup = \"importer\"\n filterset_class = ImporterFilter\n\n\nclass ImportViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"ViewSet for viewing imports from an Importer.\"\"\"\n\n endpoint_name = \"imports\"\n nest_prefix = \"importers\"\n router_lookup = \"import\"\n lookup_field = \"pk\"\n parent_viewset = ImporterViewSet\n parent_lookup_kwargs = {\"importer_pk\": \"importer__pk\"}\n serializer_class = ImportSerializer\n queryset = Import.objects.all()\n\n\nclass PulpImporterViewSet(ImporterViewSet):\n \"\"\"ViewSet for PulpImporters.\"\"\"\n\n endpoint_name = \"pulp\"\n serializer_class = PulpImporterSerializer\n queryset = PulpImporter.objects.all()\n\n\nclass PulpImportViewSet(ImportViewSet):\n \"\"\"ViewSet for PulpImports.\"\"\"\n\n parent_viewset = PulpImporterViewSet\n queryset = PulpImport.objects.all()\n\n @extend_schema(\n request=PulpImportSerializer,\n description=\"Trigger an asynchronous task to import a Pulp export.\",\n responses={202: TaskGroupOperationResponseSerializer},\n )\n def create(self, request, importer_pk):\n \"\"\"Import a Pulp export into Pulp.\"\"\"\n try:\n importer = PulpImporter.objects.get(pk=importer_pk)\n except PulpImporter.DoesNotExist:\n raise Http404\n\n serializer = PulpImportSerializer(data=request.data, context={\"request\": request})\n serializer.is_valid(raise_exception=True)\n\n path = serializer.validated_data.get(\"path\")\n toc = serializer.validated_data.get(\"toc\")\n task_group = TaskGroup.objects.create(description=f\"Import of {path}\")\n\n dispatch(\n pulp_import,\n exclusive_resources=[importer],\n task_group=task_group,\n kwargs={\"importer_pk\": importer.pk, \"path\": path, \"toc\": toc},\n )\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/importer.py"}, {"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n\n Some plugin-models have sub-repositories. We take advantage of the content-mapping\n machinery to account for those contentartifacts as well.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def __init__(self, repo_version=None, content_mapping=None):\n self.content_mapping = content_mapping\n super().__init__(repo_version)\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n if self.content_mapping:\n all_content = []\n for content_ids in self.content_mapping.values():\n all_content.extend(content_ids)\n vers_content = vers_content.union(\n ContentArtifact.objects.filter(content__in=all_content)\n )\n return vers_content.order_by(\"content\", \"relative_path\")\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}, {"content": "from import_export import resources\n\n\nclass QueryModelResource(resources.ModelResource):\n \"\"\"\n A ModelResource that knows the RepositoryVersion to use to filter its query\n\n QueryModelResource has-a repository-version that can be used to limit its export, and a\n queryset that is derived from that repository-version.\n\n A plugin-writer will subclass their ModelResources from QueryModelResource,\n and use it to define the limiting query\n\n Attributes:\n\n repo_version (models.RepositoryVersion): The RepositoryVersion whose content we would like\n to export\n queryset (django.db.models.query.QuerySet): filtering queryset for this resource\n (driven by repo_version)\n \"\"\"\n\n def set_up_queryset(self):\n return None\n\n def __init__(self, repo_version=None):\n self.repo_version = repo_version\n if repo_version:\n self.queryset = self.set_up_queryset()\n\n class Meta:\n exclude = (\"pulp_id\", \"pulp_created\", \"pulp_last_updated\")\n\n\nclass BaseContentResource(QueryModelResource):\n \"\"\"\n A QueryModelResource that knows how to fill in the 'upstream_id' export-field\n\n BaseContentResource knows to de/hydrate upstream_id with the content-being-exported's pulp_id.\n\n All Content-based resources being import/exported should subclass from this class.\n \"\"\"\n\n # An optional mapping that maps Content to Repositories. Useful when Content is sometimes not\n # tied directly to a Repository but rather to a subrepo. Formatting:\n #\n # {\"<repo name>\": [\"<content upstream_id>\", \"...\"]}\n #\n content_mapping = None\n\n class Meta:\n exclude = QueryModelResource.Meta.exclude + (\"_artifacts\", \"content\", \"content_ptr\")\n\n def dehydrate_upstream_id(self, content):\n return str(content.pulp_id)\n", "path": "pulpcore/plugin/importexport.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django_filters.rest_framework import filters\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\n\nfrom pulpcore.app.models import (\n Import,\n Importer,\n PulpImport,\n PulpImporter,\n TaskGroup,\n)\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.serializers import (\n ImportSerializer,\n ImporterSerializer,\n PulpImporterSerializer,\n PulpImportSerializer,\n TaskGroupOperationResponseSerializer,\n)\nfrom pulpcore.app.tasks import pulp_import\nfrom pulpcore.app.viewsets import (\n BaseFilterSet,\n NamedModelViewSet,\n)\nfrom pulpcore.app.viewsets.base import NAME_FILTER_OPTIONS\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ImporterFilter(BaseFilterSet):\n \"\"\"Filter for Importers.\"\"\"\n\n name = filters.CharFilter()\n\n class Meta:\n model = Importer\n fields = {\n \"name\": NAME_FILTER_OPTIONS,\n }\n\n\nclass ImporterViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"ViewSet for Importers.\"\"\"\n\n queryset = Importer.objects.all()\n serializer_class = ImporterSerializer\n endpoint_name = \"importers\"\n router_lookup = \"importer\"\n filterset_class = ImporterFilter\n\n\nclass ImportViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n):\n \"\"\"ViewSet for viewing imports from an Importer.\"\"\"\n\n endpoint_name = \"imports\"\n nest_prefix = \"importers\"\n router_lookup = \"import\"\n lookup_field = \"pk\"\n parent_viewset = ImporterViewSet\n parent_lookup_kwargs = {\"importer_pk\": \"importer__pk\"}\n serializer_class = ImportSerializer\n queryset = Import.objects.all()\n\n\nclass PulpImporterViewSet(ImporterViewSet):\n \"\"\"ViewSet for PulpImporters.\"\"\"\n\n endpoint_name = \"pulp\"\n serializer_class = PulpImporterSerializer\n queryset = PulpImporter.objects.all()\n\n\nclass PulpImportViewSet(ImportViewSet):\n \"\"\"ViewSet for PulpImports.\"\"\"\n\n parent_viewset = PulpImporterViewSet\n queryset = PulpImport.objects.all()\n\n @extend_schema(\n request=PulpImportSerializer,\n description=\"Trigger an asynchronous task to import a Pulp export.\",\n responses={202: TaskGroupOperationResponseSerializer},\n )\n def create(self, request, importer_pk):\n \"\"\"Import a Pulp export into Pulp.\"\"\"\n try:\n importer = PulpImporter.objects.get(pk=importer_pk)\n except PulpImporter.DoesNotExist:\n raise Http404\n\n serializer = PulpImportSerializer(data=request.data, context={\"request\": request})\n serializer.is_valid(raise_exception=True)\n\n path = serializer.validated_data.get(\"path\")\n toc = serializer.validated_data.get(\"toc\")\n task_group = TaskGroup.objects.create(\n description=\"Import of {path}\".format(path=path or toc)\n )\n\n dispatch(\n pulp_import,\n exclusive_resources=[importer],\n task_group=task_group,\n kwargs={\"importer_pk\": importer.pk, \"path\": path, \"toc\": toc},\n )\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/importer.py"}, {"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"timestamp_of_interest\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n\n Some plugin-models have sub-repositories. We take advantage of the content-mapping\n machinery to account for those contentartifacts as well.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def __init__(self, repo_version=None, content_mapping=None):\n self.content_mapping = content_mapping\n super().__init__(repo_version)\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n content_pks = set(self.repo_version.content.values_list(\"pk\", flat=True))\n\n if self.content_mapping:\n for content_ids in self.content_mapping.values():\n content_pks |= set(content_ids)\n\n return (\n ContentArtifact.objects.filter(content__in=content_pks)\n .order_by(\"content\", \"relative_path\")\n .select_related(\"artifact\")\n )\n\n def dehydrate_content(self, content_artifact):\n return str(content_artifact.content_id)\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}, {"content": "from import_export import resources\n\n\nclass QueryModelResource(resources.ModelResource):\n \"\"\"\n A ModelResource that knows the RepositoryVersion to use to filter its query\n\n QueryModelResource has-a repository-version that can be used to limit its export, and a\n queryset that is derived from that repository-version.\n\n A plugin-writer will subclass their ModelResources from QueryModelResource,\n and use it to define the limiting query\n\n Attributes:\n\n repo_version (models.RepositoryVersion): The RepositoryVersion whose content we would like\n to export\n queryset (django.db.models.query.QuerySet): filtering queryset for this resource\n (driven by repo_version)\n \"\"\"\n\n def set_up_queryset(self):\n return None\n\n def dehydrate_pulp_domain(self, content):\n return str(content.pulp_domain_id)\n\n def __init__(self, repo_version=None):\n self.repo_version = repo_version\n if repo_version:\n self.queryset = self.set_up_queryset()\n\n class Meta:\n exclude = (\"pulp_id\", \"pulp_created\", \"pulp_last_updated\")\n\n\nclass BaseContentResource(QueryModelResource):\n \"\"\"\n A QueryModelResource that knows how to fill in the 'upstream_id' export-field\n\n BaseContentResource knows to de/hydrate upstream_id with the content-being-exported's pulp_id.\n\n All Content-based resources being import/exported should subclass from this class.\n \"\"\"\n\n # An optional mapping that maps Content to Repositories. Useful when Content is sometimes not\n # tied directly to a Repository but rather to a subrepo. Formatting:\n #\n # {\"<repo name>\": [\"<content upstream_id>\", \"...\"]}\n #\n content_mapping = None\n\n def dehydrate_upstream_id(self, content):\n return str(content.pulp_id)\n\n class Meta:\n exclude = QueryModelResource.Meta.exclude + (\n \"_artifacts\",\n \"content\",\n \"content_ptr\",\n \"timestamp_of_interest\",\n )\n", "path": "pulpcore/plugin/importexport.py"}]} | 3,008 | 681 |
gh_patches_debug_35970 | rasdani/github-patches | git_diff | horovod__horovod-608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytorch + horovod 0.15.1 distributed optimizer not working anymore
I just upgraded horovod 0.15.0 -> 0.15.1 on a ubuntu image `4.4.0-137-generic #163-Ubuntu SMP Mon Sep 24 13:14:43 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux`. When using the DistributedOptimizer from horovod.torch I now encounter the error
```
Traceback (most recent call last):
File "train.py", line 641, in <module>
train_images(hps)
File "train.py", line 444, in train_images
train_step(batch, batch_idx, epoch, hps, model, opt, train_logger)
File "train.py", line 457, in train_step
opt.step()
File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 97, in step
return super(self.__class__, self).step(closure)
File "/opt/conda/lib/python3.6/site-packages/torch/optim/adamax.py", line 75, in step
exp_avg.mul_(beta1).add_(1 - beta1, grad)
TypeError: mul_() received an invalid combination of arguments - got (numpy.float32), but expected one of:
* (Tensor other)
didn't match because some of the arguments have invalid types: (numpy.float32)
* (float other)
didn't match because some of the arguments have invalid types: (numpy.float32)
```
Downgrading to 0.15.0 fixes the issue. The behavior is independent of CPU, GPU or MultipleGPU training.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/torch/__init__.py`
Content:
```
1 # Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 from horovod.common import check_extension
21
22 try:
23 check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',
24 __file__, 'mpi_lib_v2')
25 except:
26 check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',
27 __file__, 'mpi_lib', '_mpi_lib')
28
29 from horovod.torch.compression import Compression
30 from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
31 from horovod.torch.mpi_ops import allgather, allgather_async
32 from horovod.torch.mpi_ops import broadcast, broadcast_async, broadcast_, broadcast_async_
33 from horovod.torch.mpi_ops import poll, synchronize
34 from horovod.torch.mpi_ops import init, shutdown
35 from horovod.torch.mpi_ops import size, local_size, rank, local_rank
36 from horovod.torch.mpi_ops import mpi_threads_supported
37
38 import torch
39 import collections
40
41
42 class _DistributedOptimizer(torch.optim.Optimizer):
43 def __init__(self, params, named_parameters, compression):
44 super(self.__class__, self).__init__(params)
45 self._compression = compression
46
47 if named_parameters is not None:
48 named_parameters = list(named_parameters)
49 else:
50 named_parameters = []
51
52 # make sure that named_parameters are tuples
53 if any([not isinstance(p, tuple) for p in named_parameters]):
54 raise ValueError('named_parameters should be a sequence of '
55 'tuples (name, parameter), usually produced by '
56 'model.named_parameters().')
57
58 if len(named_parameters) > 0:
59 self._parameter_names = {v: k for k, v
60 in sorted(named_parameters)}
61 else:
62 self._parameter_names = {v: 'allreduce.noname.%s' % i
63 for param_group in self.param_groups
64 for i, v in enumerate(param_group['params'])}
65
66 self._handles = {}
67 self._grad_accs = []
68 self._requires_update = set()
69 if size() > 1:
70 self._register_hooks()
71
72 def _register_hooks(self):
73 for param_group in self.param_groups:
74 for p in param_group['params']:
75 if p.requires_grad:
76 p.grad = p.data.new(p.size()).zero_()
77 self._requires_update.add(p)
78 p_tmp = p.expand_as(p)
79 grad_acc = p_tmp.grad_fn.next_functions[0][0]
80 grad_acc.register_hook(self._make_hook(p))
81 self._grad_accs.append(grad_acc)
82
83 def _allreduce_grad(self, p):
84 name = self._parameter_names.get(p)
85 tensor = p.grad.data
86 tensor_compressed, ctx = self._compression.compress(tensor)
87
88 handle = allreduce_async_(tensor_compressed, average=True, name=name)
89 return handle, ctx
90
91 def _make_hook(self, p):
92 def hook(*ignore):
93 assert p not in self._handles
94 assert not p.grad.requires_grad
95 handle, ctx = self._allreduce_grad(p)
96 self._handles[p] = (handle, ctx)
97 return hook
98
99 def synchronize(self):
100 missing_p = self._requires_update - set(self._handles.keys())
101 for p in missing_p:
102 self._allreduce_grad(p)
103
104 for p, value in self._handles.items():
105 handle, ctx = value
106 output = synchronize(handle)
107 p.grad.data.set_(self._compression.decompress(output, ctx))
108 self._handles.clear()
109
110 def step(self, closure=None):
111 self.synchronize()
112 return super(self.__class__, self).step(closure)
113
114
115 def DistributedOptimizer(optimizer, named_parameters=None, compression=Compression.none):
116 """
117 An optimizer that wraps another torch.optim.Optimizer, using an allreduce to
118 average gradient values before applying gradients to model weights.
119
120 Allreduce operations are executed after each gradient is computed by `loss.backward()`
121 in parallel with each other. The `step()` method ensures that all allreduce operations are
122 finished before applying gradients to the model.
123
124 DistributedOptimizer exposes the `synchronize()` method, which forces allreduce operations
125 to finish before continuing the execution. It's useful in conjunction with gradient
126 clipping, or other operations that modify gradients in place before `step()` is executed.
127
128 Example of gradient clipping:
129 ```
130 output = model(data)
131 loss = F.nll_loss(output, target)
132 loss.backward()
133 optimizer.synchronize()
134 torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
135 optimizer.step()
136 ```
137
138 Arguments:
139 optimizer: Optimizer to use for computing gradients and applying updates.
140 named_parameters: A mapping between parameter names and values. Used for naming of
141 allreduce operations. Typically just `model.named_parameters()`.
142 compression: Compression algorithm used during allreduce to reduce the amount
143 of data sent during the each parameter update step. Defaults to
144 not using compression.
145 """
146 # We dynamically create a new class that inherits from the optimizer that was passed in.
147 # The goal is to override the `step()` method with an allreduce implementation.
148 cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
149 dict(_DistributedOptimizer.__dict__))
150 return cls(optimizer.param_groups, named_parameters, compression)
151
152
153 def broadcast_parameters(params, root_rank):
154 """
155 Broadcasts the parameters from root rank to all other processes.
156 Typical usage is to broadcast the `model.state_dict()`,
157 `model.named_parameters()`, or `model.parameters()`.
158
159 Arguments:
160 params: One of the following:
161 - list of parameters to broadcast
162 - dict of parameters to broadcast
163 root_rank: The rank of the process from which parameters will be
164 broadcasted to all other processes.
165 """
166 if isinstance(params, dict):
167 params = sorted(params.items())
168 elif isinstance(params, list):
169 # support both named_parameters() and regular parameters()
170 params = [p if isinstance(p, tuple) else (None, p) for p in params]
171 else:
172 raise ValueError('invalid params of type: %s' % type(params))
173
174 # Run asynchronous broadcasts.
175 handles = []
176 for name, p in params:
177 handle = broadcast_async_(p, root_rank, name)
178 handles.append(handle)
179
180 # Wait for completion.
181 for handle in handles:
182 synchronize(handle)
183
184
185 def broadcast_optimizer_state(optimizer, root_rank):
186 """
187 Broadcasts an optimizer state from root rank to all other processes.
188
189 Arguments:
190 optimizer: An optimizer.
191 root_rank: The rank of the process from which the optimizer will be
192 broadcasted to all other processes.
193 """
194 if isinstance(optimizer, torch.optim.LBFGS):
195 # TODO(travis): L-BFGS cannot be easily supported without serializing
196 # the entire state_dict, as its structure is deeply nested and contains
197 # None type parameter values
198 raise ValueError('cannot broadcast torch.optim.LBFGS state')
199
200 state_dict = optimizer.state_dict()
201
202 # Newly created optimizers will not have their state initialized, so
203 # do that initialization here
204 if len(state_dict['state']) == 0:
205 for group in optimizer.param_groups:
206 for p in group['params']:
207 p.grad = p.data.new(p.size()).zero_()
208 # This function accepts a torch.optim.Optimizer or a DistributedOptimizer
209 # wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
210 # forces allreduce on all model parameters, which will result in deadlock
211 # unless every rank calls step(). Therefore, to finish state initialization
212 # only call optimizer.step() with a torch.optim.Optimizer.
213 if optimizer.__module__ == DistributedOptimizer.__module__:
214 super(optimizer.__class__, optimizer).step()
215 else:
216 optimizer.step()
217 state_dict = optimizer.state_dict()
218
219 # If the state_dict is still empty after initialization, then
220 # the optimizer is stateless, and there is nothing to broadcast.
221 # Furthermore, attempting to access the state dict would result in
222 # an error.
223 if len(state_dict['state']) == 0:
224 return
225
226 params = []
227 callbacks = {}
228 occurrences = collections.defaultdict(int)
229
230 # Some optimizer parameters may be represented as scalars instead of
231 # tensors. In such cases, we need to wrap the scalar in a tensor, then
232 # broadcast, then update the appropriate value in the state_dict with the
233 # new unwrapped scalar value via a callback.
234 def _create_callback(pid, name, t, p):
235 def _from_tensor():
236 state_dict['state'][pid][name] = t(p.numpy()[0])
237 return _from_tensor
238
239 def _create_option_callback(index, option_key, option_tensor, dtype):
240 def _from_tensor():
241 optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])
242 return _from_tensor
243
244 # Param groups are an ordered list, normally there is only one per model,
245 # but users can add additional param groups for example to train
246 # previously frozen layers
247 for index, group in enumerate(state_dict['param_groups']):
248 # Broadcast options like learning rate
249 for option_key, option_value in group.items():
250 if option_key == 'params':
251 continue
252
253 # Options like the learning rate are scalar, and need to be wrapped in tensors
254 key = '%s.%d' % (option_key, index)
255 dtype = type(option_value)
256 option_tensor = torch.Tensor([option_value])
257 callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtype)
258 params.append((key, option_tensor))
259
260 # The params list here is ordered by the layers in the model
261 for pid in group['params']:
262 param_state = state_dict['state'][pid]
263 for name, p in param_state.items():
264 # Some parameter names may appear more than once, in which
265 # case we ensure they have a unique identifier defined by
266 # their order
267 occurrences[name] += 1
268 key = '%s.%d' % (str(name), occurrences[name])
269
270 if not torch.is_tensor(p):
271 # Wrap the scalar in a FloatTensor, and remember its type
272 # so we can cast it back after unwrapping
273 t = type(p)
274 p = torch.Tensor([p])
275 callbacks[key] = _create_callback(pid, name, t, p)
276
277 params.append((key, p))
278
279 # Synchronized broadcast of all parameters
280 broadcast_parameters(params, root_rank)
281
282 # Post-broadcast clenaup for non-tensor parameters
283 for key, p in params:
284 if key in callbacks:
285 callbacks[key]()
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/torch/__init__.py b/horovod/torch/__init__.py
--- a/horovod/torch/__init__.py
+++ b/horovod/torch/__init__.py
@@ -227,6 +227,22 @@
callbacks = {}
occurrences = collections.defaultdict(int)
+ # Returns the full type structure of the possibly nested objects for recursive casting back
+ def _get_types(x):
+ if isinstance(x, collections.Iterable):
+ return type(x), [_get_types(xi) for xi in x]
+ else:
+ return type(x)
+
+ # Casts an object encoded in a tensor back into its original type and subtypes
+ def _recursive_cast(x, dtype):
+ if isinstance(dtype, tuple):
+ t, dtypes = dtype
+ x = t(x)
+ return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
+ else:
+ return dtype(x)
+
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
@@ -236,9 +252,9 @@
state_dict['state'][pid][name] = t(p.numpy()[0])
return _from_tensor
- def _create_option_callback(index, option_key, option_tensor, dtype):
+ def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
- optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])
+ optimizer.param_groups[index][option_key] = _recursive_cast(option_tensor.numpy()[0], dtypes)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
@@ -252,9 +268,9 @@
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = '%s.%d' % (option_key, index)
- dtype = type(option_value)
+ dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
- callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtype)
+ callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtypes)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
| {"golden_diff": "diff --git a/horovod/torch/__init__.py b/horovod/torch/__init__.py\n--- a/horovod/torch/__init__.py\n+++ b/horovod/torch/__init__.py\n@@ -227,6 +227,22 @@\n callbacks = {}\n occurrences = collections.defaultdict(int)\n \n+ # Returns the full type structure of the possibly nested objects for recursive casting back\n+ def _get_types(x):\n+ if isinstance(x, collections.Iterable):\n+ return type(x), [_get_types(xi) for xi in x]\n+ else:\n+ return type(x)\n+\n+ # Casts an object encoded in a tensor back into its original type and subtypes\n+ def _recursive_cast(x, dtype):\n+ if isinstance(dtype, tuple):\n+ t, dtypes = dtype\n+ x = t(x)\n+ return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])\n+ else:\n+ return dtype(x)\n+\n # Some optimizer parameters may be represented as scalars instead of\n # tensors. In such cases, we need to wrap the scalar in a tensor, then\n # broadcast, then update the appropriate value in the state_dict with the\n@@ -236,9 +252,9 @@\n state_dict['state'][pid][name] = t(p.numpy()[0])\n return _from_tensor\n \n- def _create_option_callback(index, option_key, option_tensor, dtype):\n+ def _create_option_callback(index, option_key, option_tensor, dtypes):\n def _from_tensor():\n- optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])\n+ optimizer.param_groups[index][option_key] = _recursive_cast(option_tensor.numpy()[0], dtypes)\n return _from_tensor\n \n # Param groups are an ordered list, normally there is only one per model,\n@@ -252,9 +268,9 @@\n \n # Options like the learning rate are scalar, and need to be wrapped in tensors\n key = '%s.%d' % (option_key, index)\n- dtype = type(option_value)\n+ dtypes = _get_types(option_value)\n option_tensor = torch.Tensor([option_value])\n- callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtype)\n+ callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtypes)\n params.append((key, option_tensor))\n \n # The params list here is ordered by the layers in the model\n", "issue": "pytorch + horovod 0.15.1 distributed optimizer not working anymore\nI just upgraded horovod 0.15.0 -> 0.15.1 on a ubuntu image `4.4.0-137-generic #163-Ubuntu SMP Mon Sep 24 13:14:43 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux`. When using the DistributedOptimizer from horovod.torch I now encounter the error\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"train.py\", line 641, in <module>\r\n train_images(hps)\r\n File \"train.py\", line 444, in train_images\r\n train_step(batch, batch_idx, epoch, hps, model, opt, train_logger)\r\n File \"train.py\", line 457, in train_step\r\n opt.step()\r\n File \"/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py\", line 97, in step\r\n return super(self.__class__, self).step(closure)\r\n File \"/opt/conda/lib/python3.6/site-packages/torch/optim/adamax.py\", line 75, in step\r\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\r\nTypeError: mul_() received an invalid combination of arguments - got (numpy.float32), but expected one of:\r\n * (Tensor other)\r\n didn't match because some of the arguments have invalid types: (numpy.float32)\r\n * (float other)\r\n didn't match because some of the arguments have invalid types: (numpy.float32)\r\n```\r\n\r\nDowngrading to 0.15.0 fixes the issue. The behavior is independent of CPU, GPU or MultipleGPU training.\n", "before_files": [{"content": "# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom horovod.common import check_extension\n\ntry:\n check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',\n __file__, 'mpi_lib_v2')\nexcept:\n check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',\n __file__, 'mpi_lib', '_mpi_lib')\n\nfrom horovod.torch.compression import Compression\nfrom horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_\nfrom horovod.torch.mpi_ops import allgather, allgather_async\nfrom horovod.torch.mpi_ops import broadcast, broadcast_async, broadcast_, broadcast_async_\nfrom horovod.torch.mpi_ops import poll, synchronize\nfrom horovod.torch.mpi_ops import init, shutdown\nfrom horovod.torch.mpi_ops import size, local_size, rank, local_rank\nfrom horovod.torch.mpi_ops import mpi_threads_supported\n\nimport torch\nimport collections\n\n\nclass _DistributedOptimizer(torch.optim.Optimizer):\n def __init__(self, params, named_parameters, compression):\n super(self.__class__, self).__init__(params)\n self._compression = compression\n\n if named_parameters is not None:\n named_parameters = list(named_parameters)\n else:\n named_parameters = []\n\n # make sure that named_parameters are tuples\n if any([not isinstance(p, tuple) for p in named_parameters]):\n raise ValueError('named_parameters should be a sequence of '\n 'tuples (name, parameter), usually produced by '\n 'model.named_parameters().')\n\n if len(named_parameters) > 0:\n self._parameter_names = {v: k for k, v\n in sorted(named_parameters)}\n else:\n self._parameter_names = {v: 'allreduce.noname.%s' % i\n for param_group in self.param_groups\n for i, v in enumerate(param_group['params'])}\n\n self._handles = {}\n self._grad_accs = []\n self._requires_update = set()\n if size() > 1:\n self._register_hooks()\n\n def _register_hooks(self):\n for param_group in self.param_groups:\n for p in param_group['params']:\n if p.requires_grad:\n p.grad = p.data.new(p.size()).zero_()\n self._requires_update.add(p)\n p_tmp = p.expand_as(p)\n grad_acc = p_tmp.grad_fn.next_functions[0][0]\n grad_acc.register_hook(self._make_hook(p))\n self._grad_accs.append(grad_acc)\n\n def _allreduce_grad(self, p):\n name = self._parameter_names.get(p)\n tensor = p.grad.data\n tensor_compressed, ctx = self._compression.compress(tensor)\n\n handle = allreduce_async_(tensor_compressed, average=True, name=name)\n return handle, ctx\n\n def _make_hook(self, p):\n def hook(*ignore):\n assert p not in self._handles\n assert not p.grad.requires_grad\n handle, ctx = self._allreduce_grad(p)\n self._handles[p] = (handle, ctx)\n return hook\n\n def synchronize(self):\n missing_p = self._requires_update - set(self._handles.keys())\n for p in missing_p:\n self._allreduce_grad(p)\n\n for p, value in self._handles.items():\n handle, ctx = value\n output = synchronize(handle)\n p.grad.data.set_(self._compression.decompress(output, ctx))\n self._handles.clear()\n\n def step(self, closure=None):\n self.synchronize()\n return super(self.__class__, self).step(closure)\n\n\ndef DistributedOptimizer(optimizer, named_parameters=None, compression=Compression.none):\n \"\"\"\n An optimizer that wraps another torch.optim.Optimizer, using an allreduce to\n average gradient values before applying gradients to model weights.\n\n Allreduce operations are executed after each gradient is computed by `loss.backward()`\n in parallel with each other. The `step()` method ensures that all allreduce operations are\n finished before applying gradients to the model.\n\n DistributedOptimizer exposes the `synchronize()` method, which forces allreduce operations\n to finish before continuing the execution. It's useful in conjunction with gradient\n clipping, or other operations that modify gradients in place before `step()` is executed.\n\n Example of gradient clipping:\n ```\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.synchronize()\n torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)\n optimizer.step()\n ```\n\n Arguments:\n optimizer: Optimizer to use for computing gradients and applying updates.\n named_parameters: A mapping between parameter names and values. Used for naming of\n allreduce operations. Typically just `model.named_parameters()`.\n compression: Compression algorithm used during allreduce to reduce the amount\n of data sent during the each parameter update step. Defaults to\n not using compression.\n \"\"\"\n # We dynamically create a new class that inherits from the optimizer that was passed in.\n # The goal is to override the `step()` method with an allreduce implementation.\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,),\n dict(_DistributedOptimizer.__dict__))\n return cls(optimizer.param_groups, named_parameters, compression)\n\n\ndef broadcast_parameters(params, root_rank):\n \"\"\"\n Broadcasts the parameters from root rank to all other processes.\n Typical usage is to broadcast the `model.state_dict()`,\n `model.named_parameters()`, or `model.parameters()`.\n\n Arguments:\n params: One of the following:\n - list of parameters to broadcast\n - dict of parameters to broadcast\n root_rank: The rank of the process from which parameters will be\n broadcasted to all other processes.\n \"\"\"\n if isinstance(params, dict):\n params = sorted(params.items())\n elif isinstance(params, list):\n # support both named_parameters() and regular parameters()\n params = [p if isinstance(p, tuple) else (None, p) for p in params]\n else:\n raise ValueError('invalid params of type: %s' % type(params))\n\n # Run asynchronous broadcasts.\n handles = []\n for name, p in params:\n handle = broadcast_async_(p, root_rank, name)\n handles.append(handle)\n\n # Wait for completion.\n for handle in handles:\n synchronize(handle)\n\n\ndef broadcast_optimizer_state(optimizer, root_rank):\n \"\"\"\n Broadcasts an optimizer state from root rank to all other processes.\n\n Arguments:\n optimizer: An optimizer.\n root_rank: The rank of the process from which the optimizer will be\n broadcasted to all other processes.\n \"\"\"\n if isinstance(optimizer, torch.optim.LBFGS):\n # TODO(travis): L-BFGS cannot be easily supported without serializing\n # the entire state_dict, as its structure is deeply nested and contains\n # None type parameter values\n raise ValueError('cannot broadcast torch.optim.LBFGS state')\n\n state_dict = optimizer.state_dict()\n\n # Newly created optimizers will not have their state initialized, so\n # do that initialization here\n if len(state_dict['state']) == 0:\n for group in optimizer.param_groups:\n for p in group['params']:\n p.grad = p.data.new(p.size()).zero_()\n # This function accepts a torch.optim.Optimizer or a DistributedOptimizer\n # wrapped around a torch optimizer. Calling step() with a DistributedOptimizer\n # forces allreduce on all model parameters, which will result in deadlock\n # unless every rank calls step(). Therefore, to finish state initialization\n # only call optimizer.step() with a torch.optim.Optimizer.\n if optimizer.__module__ == DistributedOptimizer.__module__:\n super(optimizer.__class__, optimizer).step()\n else:\n optimizer.step()\n state_dict = optimizer.state_dict()\n\n # If the state_dict is still empty after initialization, then\n # the optimizer is stateless, and there is nothing to broadcast.\n # Furthermore, attempting to access the state dict would result in\n # an error.\n if len(state_dict['state']) == 0:\n return\n\n params = []\n callbacks = {}\n occurrences = collections.defaultdict(int)\n\n # Some optimizer parameters may be represented as scalars instead of\n # tensors. In such cases, we need to wrap the scalar in a tensor, then\n # broadcast, then update the appropriate value in the state_dict with the\n # new unwrapped scalar value via a callback.\n def _create_callback(pid, name, t, p):\n def _from_tensor():\n state_dict['state'][pid][name] = t(p.numpy()[0])\n return _from_tensor\n\n def _create_option_callback(index, option_key, option_tensor, dtype):\n def _from_tensor():\n optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])\n return _from_tensor\n\n # Param groups are an ordered list, normally there is only one per model,\n # but users can add additional param groups for example to train\n # previously frozen layers\n for index, group in enumerate(state_dict['param_groups']):\n # Broadcast options like learning rate\n for option_key, option_value in group.items():\n if option_key == 'params':\n continue\n\n # Options like the learning rate are scalar, and need to be wrapped in tensors\n key = '%s.%d' % (option_key, index)\n dtype = type(option_value)\n option_tensor = torch.Tensor([option_value])\n callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtype)\n params.append((key, option_tensor))\n\n # The params list here is ordered by the layers in the model\n for pid in group['params']:\n param_state = state_dict['state'][pid]\n for name, p in param_state.items():\n # Some parameter names may appear more than once, in which\n # case we ensure they have a unique identifier defined by\n # their order\n occurrences[name] += 1\n key = '%s.%d' % (str(name), occurrences[name])\n\n if not torch.is_tensor(p):\n # Wrap the scalar in a FloatTensor, and remember its type\n # so we can cast it back after unwrapping\n t = type(p)\n p = torch.Tensor([p])\n callbacks[key] = _create_callback(pid, name, t, p)\n\n params.append((key, p))\n\n # Synchronized broadcast of all parameters\n broadcast_parameters(params, root_rank)\n\n # Post-broadcast clenaup for non-tensor parameters\n for key, p in params:\n if key in callbacks:\n callbacks[key]()\n", "path": "horovod/torch/__init__.py"}], "after_files": [{"content": "# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom horovod.common import check_extension\n\ntry:\n check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',\n __file__, 'mpi_lib_v2')\nexcept:\n check_extension('horovod.torch', 'HOROVOD_WITH_PYTORCH',\n __file__, 'mpi_lib', '_mpi_lib')\n\nfrom horovod.torch.compression import Compression\nfrom horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_\nfrom horovod.torch.mpi_ops import allgather, allgather_async\nfrom horovod.torch.mpi_ops import broadcast, broadcast_async, broadcast_, broadcast_async_\nfrom horovod.torch.mpi_ops import poll, synchronize\nfrom horovod.torch.mpi_ops import init, shutdown\nfrom horovod.torch.mpi_ops import size, local_size, rank, local_rank\nfrom horovod.torch.mpi_ops import mpi_threads_supported\n\nimport torch\nimport collections\n\n\nclass _DistributedOptimizer(torch.optim.Optimizer):\n def __init__(self, params, named_parameters, compression):\n super(self.__class__, self).__init__(params)\n self._compression = compression\n\n if named_parameters is not None:\n named_parameters = list(named_parameters)\n else:\n named_parameters = []\n\n # make sure that named_parameters are tuples\n if any([not isinstance(p, tuple) for p in named_parameters]):\n raise ValueError('named_parameters should be a sequence of '\n 'tuples (name, parameter), usually produced by '\n 'model.named_parameters().')\n\n if len(named_parameters) > 0:\n self._parameter_names = {v: k for k, v\n in sorted(named_parameters)}\n else:\n self._parameter_names = {v: 'allreduce.noname.%s' % i\n for param_group in self.param_groups\n for i, v in enumerate(param_group['params'])}\n\n self._handles = {}\n self._grad_accs = []\n self._requires_update = set()\n if size() > 1:\n self._register_hooks()\n\n def _register_hooks(self):\n for param_group in self.param_groups:\n for p in param_group['params']:\n if p.requires_grad:\n p.grad = p.data.new(p.size()).zero_()\n self._requires_update.add(p)\n p_tmp = p.expand_as(p)\n grad_acc = p_tmp.grad_fn.next_functions[0][0]\n grad_acc.register_hook(self._make_hook(p))\n self._grad_accs.append(grad_acc)\n\n def _allreduce_grad(self, p):\n name = self._parameter_names.get(p)\n tensor = p.grad.data\n tensor_compressed, ctx = self._compression.compress(tensor)\n\n handle = allreduce_async_(tensor_compressed, average=True, name=name)\n return handle, ctx\n\n def _make_hook(self, p):\n def hook(*ignore):\n assert p not in self._handles\n assert not p.grad.requires_grad\n handle, ctx = self._allreduce_grad(p)\n self._handles[p] = (handle, ctx)\n return hook\n\n def synchronize(self):\n missing_p = self._requires_update - set(self._handles.keys())\n for p in missing_p:\n self._allreduce_grad(p)\n\n for p, value in self._handles.items():\n handle, ctx = value\n output = synchronize(handle)\n p.grad.data.set_(self._compression.decompress(output, ctx))\n self._handles.clear()\n\n def step(self, closure=None):\n self.synchronize()\n return super(self.__class__, self).step(closure)\n\n\ndef DistributedOptimizer(optimizer, named_parameters=None, compression=Compression.none):\n \"\"\"\n An optimizer that wraps another torch.optim.Optimizer, using an allreduce to\n average gradient values before applying gradients to model weights.\n\n Allreduce operations are executed after each gradient is computed by `loss.backward()`\n in parallel with each other. The `step()` method ensures that all allreduce operations are\n finished before applying gradients to the model.\n\n DistributedOptimizer exposes the `synchronize()` method, which forces allreduce operations\n to finish before continuing the execution. It's useful in conjunction with gradient\n clipping, or other operations that modify gradients in place before `step()` is executed.\n\n Example of gradient clipping:\n ```\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.synchronize()\n torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)\n optimizer.step()\n ```\n\n Arguments:\n optimizer: Optimizer to use for computing gradients and applying updates.\n named_parameters: A mapping between parameter names and values. Used for naming of\n allreduce operations. Typically just `model.named_parameters()`.\n compression: Compression algorithm used during allreduce to reduce the amount\n of data sent during the each parameter update step. Defaults to\n not using compression.\n \"\"\"\n # We dynamically create a new class that inherits from the optimizer that was passed in.\n # The goal is to override the `step()` method with an allreduce implementation.\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,),\n dict(_DistributedOptimizer.__dict__))\n return cls(optimizer.param_groups, named_parameters, compression)\n\n\ndef broadcast_parameters(params, root_rank):\n \"\"\"\n Broadcasts the parameters from root rank to all other processes.\n Typical usage is to broadcast the `model.state_dict()`,\n `model.named_parameters()`, or `model.parameters()`.\n\n Arguments:\n params: One of the following:\n - list of parameters to broadcast\n - dict of parameters to broadcast\n root_rank: The rank of the process from which parameters will be\n broadcasted to all other processes.\n \"\"\"\n if isinstance(params, dict):\n params = sorted(params.items())\n elif isinstance(params, list):\n # support both named_parameters() and regular parameters()\n params = [p if isinstance(p, tuple) else (None, p) for p in params]\n else:\n raise ValueError('invalid params of type: %s' % type(params))\n\n # Run asynchronous broadcasts.\n handles = []\n for name, p in params:\n handle = broadcast_async_(p, root_rank, name)\n handles.append(handle)\n\n # Wait for completion.\n for handle in handles:\n synchronize(handle)\n\n\ndef broadcast_optimizer_state(optimizer, root_rank):\n \"\"\"\n Broadcasts an optimizer state from root rank to all other processes.\n\n Arguments:\n optimizer: An optimizer.\n root_rank: The rank of the process from which the optimizer will be\n broadcasted to all other processes.\n \"\"\"\n if isinstance(optimizer, torch.optim.LBFGS):\n # TODO(travis): L-BFGS cannot be easily supported without serializing\n # the entire state_dict, as its structure is deeply nested and contains\n # None type parameter values\n raise ValueError('cannot broadcast torch.optim.LBFGS state')\n\n state_dict = optimizer.state_dict()\n\n # Newly created optimizers will not have their state initialized, so\n # do that initialization here\n if len(state_dict['state']) == 0:\n for group in optimizer.param_groups:\n for p in group['params']:\n p.grad = p.data.new(p.size()).zero_()\n # This function accepts a torch.optim.Optimizer or a DistributedOptimizer\n # wrapped around a torch optimizer. Calling step() with a DistributedOptimizer\n # forces allreduce on all model parameters, which will result in deadlock\n # unless every rank calls step(). Therefore, to finish state initialization\n # only call optimizer.step() with a torch.optim.Optimizer.\n if optimizer.__module__ == DistributedOptimizer.__module__:\n super(optimizer.__class__, optimizer).step()\n else:\n optimizer.step()\n state_dict = optimizer.state_dict()\n\n # If the state_dict is still empty after initialization, then\n # the optimizer is stateless, and there is nothing to broadcast.\n # Furthermore, attempting to access the state dict would result in\n # an error.\n if len(state_dict['state']) == 0:\n return\n\n params = []\n callbacks = {}\n occurrences = collections.defaultdict(int)\n\n # Returns the full type structure of the possibly nested objects for recursive casting back\n def _get_types(x):\n if isinstance(x, collections.Iterable):\n return type(x), [_get_types(xi) for xi in x]\n else:\n return type(x)\n\n # Casts an object encoded in a tensor back into its original type and subtypes\n def _recursive_cast(x, dtype):\n if isinstance(dtype, tuple):\n t, dtypes = dtype\n x = t(x)\n return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])\n else:\n return dtype(x)\n\n # Some optimizer parameters may be represented as scalars instead of\n # tensors. In such cases, we need to wrap the scalar in a tensor, then\n # broadcast, then update the appropriate value in the state_dict with the\n # new unwrapped scalar value via a callback.\n def _create_callback(pid, name, t, p):\n def _from_tensor():\n state_dict['state'][pid][name] = t(p.numpy()[0])\n return _from_tensor\n\n def _create_option_callback(index, option_key, option_tensor, dtypes):\n def _from_tensor():\n optimizer.param_groups[index][option_key] = _recursive_cast(option_tensor.numpy()[0], dtypes)\n return _from_tensor\n\n # Param groups are an ordered list, normally there is only one per model,\n # but users can add additional param groups for example to train\n # previously frozen layers\n for index, group in enumerate(state_dict['param_groups']):\n # Broadcast options like learning rate\n for option_key, option_value in group.items():\n if option_key == 'params':\n continue\n\n # Options like the learning rate are scalar, and need to be wrapped in tensors\n key = '%s.%d' % (option_key, index)\n dtypes = _get_types(option_value)\n option_tensor = torch.Tensor([option_value])\n callbacks[key] = _create_option_callback(index, option_key, option_tensor, dtypes)\n params.append((key, option_tensor))\n\n # The params list here is ordered by the layers in the model\n for pid in group['params']:\n param_state = state_dict['state'][pid]\n for name, p in param_state.items():\n # Some parameter names may appear more than once, in which\n # case we ensure they have a unique identifier defined by\n # their order\n occurrences[name] += 1\n key = '%s.%d' % (str(name), occurrences[name])\n\n if not torch.is_tensor(p):\n # Wrap the scalar in a FloatTensor, and remember its type\n # so we can cast it back after unwrapping\n t = type(p)\n p = torch.Tensor([p])\n callbacks[key] = _create_callback(pid, name, t, p)\n\n params.append((key, p))\n\n # Synchronized broadcast of all parameters\n broadcast_parameters(params, root_rank)\n\n # Post-broadcast clenaup for non-tensor parameters\n for key, p in params:\n if key in callbacks:\n callbacks[key]()\n", "path": "horovod/torch/__init__.py"}]} | 3,898 | 567 |
gh_patches_debug_30508 | rasdani/github-patches | git_diff | vas3k__vas3k.club-709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Баг: В посте ссылке, если ссылка на ютуб плейлист то он ресолвиться в первое видео, и теряеться информация о том что это плейлист
Пример https://vas3k.club/link/11936/ линк и эмбедд ведет на https://www.youtube.com/watch?v=CC71WyVLnnk вместо ожидаемого https://www.youtube.com/watch?v=CC71WyVLnnk&list=PLXOrZPAO2Ui021R3sKD5z0n9Qzeix9Kbj&index=2
Аналогично при вставке ссылки на плейлист в тело поста, она ресолвится в первое видео
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/regexp.py`
Content:
```
1 import re
2
3 USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_-]{3,})")
4 IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
5 VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
6 YOUTUBE_RE = re.compile(
7 r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(&(amp;)?[\w\?=]*)?"
8 )
9 TWITTER_RE = re.compile(r"(https?:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/[\d]+)")
10 FAVICON_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png|ico)")
11
```
Path: `common/markdown/club_renderer.py`
Content:
```
1 import html
2 import mistune
3 from urllib.parse import unquote
4 from mistune import escape_html
5 from slugify import slugify
6
7 from common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE
8
9 IMAGE_CSS_CLASSES = {
10 "-": "text-body-image-full"
11 }
12
13
14 class ClubRenderer(mistune.HTMLRenderer):
15 def text(self, text):
16 text = escape_html(text)
17 text = USERNAME_RE.sub(r' <a href="/user/\1/">@\1</a>', text)
18 return text
19
20 def paragraph(self, text):
21 text = text.replace("\n", "<br>\n") # Mistune 2.0 broke newlines, let's hack it =/
22 return f"<p>{text}</p>\n"
23
24 def heading(self, text, level):
25 tag = f"h{level}"
26 anchor = slugify(text[:24])
27 return f"<{tag} id=\"{anchor}\"><a href=\"#{anchor}\">{text}</a></{tag}>\n"
28
29 def link(self, link, text=None, title=None):
30 if not text and not title:
31 # it's a pure link (without link tag) and we can try to parse it
32 embed = self.embed(link, text or "", title or "")
33 if embed:
34 return embed
35
36 if text is None:
37 text = link
38
39 # here's some magic of unescape->unquote->escape
40 # to fix cyrillic (and other non-latin) wikipedia URLs
41 return f'<a href="{self._safe_url(link)}">{html.escape(unquote(html.unescape(text or link)))}</a>'
42
43 def image(self, src, alt="", title=None):
44 embed = self.embed(src, alt, title)
45 if embed:
46 return embed
47
48 # users can try to "hack" our parser by using non-image urls
49 # so, if its not an image or video, display it as a link to avoid auto-loading
50 return f'<a href="{escape_html(src)}">{escape_html(src)}</a>'
51
52 def embed(self, src, alt="", title=None):
53 if IMAGE_RE.match(src):
54 return self.simple_image(src, alt, title)
55
56 if YOUTUBE_RE.match(src):
57 return self.youtube(src, alt, title)
58
59 if VIDEO_RE.match(src):
60 return self.video(src, alt, title)
61
62 if TWITTER_RE.match(src):
63 return self.tweet(src, alt, title)
64
65 return None
66
67 def simple_image(self, src, alt="", title=None):
68 css_classes = ""
69 title = title or alt
70 if title in IMAGE_CSS_CLASSES:
71 css_classes = IMAGE_CSS_CLASSES[title]
72
73 image_tag = f'<img loading="lazy" src="{escape_html(src)}" alt="{escape_html(title)}">'
74 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
75 return f'<figure class="{css_classes}">{image_tag}{caption}</figure>'
76
77 def youtube(self, src, alt="", title=None):
78 youtube_match = YOUTUBE_RE.match(src)
79 video_tag = (
80 f'<span class="ratio-16-9">'
81 f'<iframe loading="lazy" src="https://www.youtube.com/embed/{escape_html(youtube_match.group(1))}'
82 f'?autoplay=0&controls=1&showinfo=1&vq=hd1080"'
83 f'allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen"'
84 f'allowfullscreen></iframe>'
85 f"</span>"
86 )
87 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
88 return f"<figure>{video_tag}{caption}</figure>"
89
90 def video(self, src, alt="", title=None):
91 video_tag = (
92 f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'
93 )
94 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
95 return f"<figure>{video_tag}{caption}</figure>"
96
97 def tweet(self, src, alt="", title=None):
98 tweet_match = TWITTER_RE.match(src)
99 twitter_tag = f'<blockquote class="twitter-tweet" tw-align-center>' \
100 f'<a href="{tweet_match.group(1)}"></a></blockquote><br>' \
101 f'<a href="{src}" target="_blank">{src}</a>'
102 return twitter_tag
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py
--- a/common/markdown/club_renderer.py
+++ b/common/markdown/club_renderer.py
@@ -76,10 +76,13 @@
def youtube(self, src, alt="", title=None):
youtube_match = YOUTUBE_RE.match(src)
+ playlist = ""
+ if youtube_match.group(2):
+ playlist = f"list={escape_html(youtube_match.group(2))}&listType=playlist&"
video_tag = (
f'<span class="ratio-16-9">'
f'<iframe loading="lazy" src="https://www.youtube.com/embed/{escape_html(youtube_match.group(1))}'
- f'?autoplay=0&controls=1&showinfo=1&vq=hd1080"'
+ f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080"'
f'allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen"'
f'allowfullscreen></iframe>'
f"</span>"
diff --git a/common/regexp.py b/common/regexp.py
--- a/common/regexp.py
+++ b/common/regexp.py
@@ -4,7 +4,7 @@
IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
YOUTUBE_RE = re.compile(
- r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(&(amp;)?[\w\?=]*)?"
+ r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(?:.*list=(PL[\w\-\_]*))?"
)
TWITTER_RE = re.compile(r"(https?:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/[\d]+)")
FAVICON_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png|ico)")
| {"golden_diff": "diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py\n--- a/common/markdown/club_renderer.py\n+++ b/common/markdown/club_renderer.py\n@@ -76,10 +76,13 @@\n \n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n+ playlist = \"\"\n+ if youtube_match.group(2):\n+ playlist = f\"list={escape_html(youtube_match.group(2))}&listType=playlist&\"\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1))}'\n- f'?autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n+ f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\ndiff --git a/common/regexp.py b/common/regexp.py\n--- a/common/regexp.py\n+++ b/common/regexp.py\n@@ -4,7 +4,7 @@\n IMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\n VIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\n YOUTUBE_RE = re.compile(\n- r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n+ r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(?:.*list=(PL[\\w\\-\\_]*))?\"\n )\n TWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\n FAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "issue": "\u0411\u0430\u0433: \u0412 \u043f\u043e\u0441\u0442\u0435 \u0441\u0441\u044b\u043b\u043a\u0435, \u0435\u0441\u043b\u0438 \u0441\u0441\u044b\u043b\u043a\u0430 \u043d\u0430 \u044e\u0442\u0443\u0431 \u043f\u043b\u0435\u0439\u043b\u0438\u0441\u0442 \u0442\u043e \u043e\u043d \u0440\u0435\u0441\u043e\u043b\u0432\u0438\u0442\u044c\u0441\u044f \u0432 \u043f\u0435\u0440\u0432\u043e\u0435 \u0432\u0438\u0434\u0435\u043e, \u0438 \u0442\u0435\u0440\u044f\u0435\u0442\u044c\u0441\u044f \u0438\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0438\u044f \u043e \u0442\u043e\u043c \u0447\u0442\u043e \u044d\u0442\u043e \u043f\u043b\u0435\u0439\u043b\u0438\u0441\u0442 \n\u041f\u0440\u0438\u043c\u0435\u0440 https://vas3k.club/link/11936/ \u043b\u0438\u043d\u043a \u0438 \u044d\u043c\u0431\u0435\u0434\u0434 \u0432\u0435\u0434\u0435\u0442 \u043d\u0430 https://www.youtube.com/watch?v=CC71WyVLnnk \u0432\u043c\u0435\u0441\u0442\u043e \u043e\u0436\u0438\u0434\u0430\u0435\u043c\u043e\u0433\u043e https://www.youtube.com/watch?v=CC71WyVLnnk&list=PLXOrZPAO2Ui021R3sKD5z0n9Qzeix9Kbj&index=2 \r\n\r\n\u0410\u043d\u0430\u043b\u043e\u0433\u0438\u0447\u043d\u043e \u043f\u0440\u0438 \u0432\u0441\u0442\u0430\u0432\u043a\u0435 \u0441\u0441\u044b\u043b\u043a\u0438 \u043d\u0430 \u043f\u043b\u0435\u0439\u043b\u0438\u0441\u0442 \u0432 \u0442\u0435\u043b\u043e \u043f\u043e\u0441\u0442\u0430, \u043e\u043d\u0430 \u0440\u0435\u0441\u043e\u043b\u0432\u0438\u0442\u0441\u044f \u0432 \u043f\u0435\u0440\u0432\u043e\u0435 \u0432\u0438\u0434\u0435\u043e\n", "before_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}, {"content": "import html\nimport mistune\nfrom urllib.parse import unquote\nfrom mistune import escape_html\nfrom slugify import slugify\n\nfrom common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE\n\nIMAGE_CSS_CLASSES = {\n \"-\": \"text-body-image-full\"\n}\n\n\nclass ClubRenderer(mistune.HTMLRenderer):\n def text(self, text):\n text = escape_html(text)\n text = USERNAME_RE.sub(r' <a href=\"/user/\\1/\">@\\1</a>', text)\n return text\n\n def paragraph(self, text):\n text = text.replace(\"\\n\", \"<br>\\n\") # Mistune 2.0 broke newlines, let's hack it =/\n return f\"<p>{text}</p>\\n\"\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n anchor = slugify(text[:24])\n return f\"<{tag} id=\\\"{anchor}\\\"><a href=\\\"#{anchor}\\\">{text}</a></{tag}>\\n\"\n\n def link(self, link, text=None, title=None):\n if not text and not title:\n # it's a pure link (without link tag) and we can try to parse it\n embed = self.embed(link, text or \"\", title or \"\")\n if embed:\n return embed\n\n if text is None:\n text = link\n\n # here's some magic of unescape->unquote->escape\n # to fix cyrillic (and other non-latin) wikipedia URLs\n return f'<a href=\"{self._safe_url(link)}\">{html.escape(unquote(html.unescape(text or link)))}</a>'\n\n def image(self, src, alt=\"\", title=None):\n embed = self.embed(src, alt, title)\n if embed:\n return embed\n\n # users can try to \"hack\" our parser by using non-image urls\n # so, if its not an image or video, display it as a link to avoid auto-loading\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a>'\n\n def embed(self, src, alt=\"\", title=None):\n if IMAGE_RE.match(src):\n return self.simple_image(src, alt, title)\n\n if YOUTUBE_RE.match(src):\n return self.youtube(src, alt, title)\n\n if VIDEO_RE.match(src):\n return self.video(src, alt, title)\n\n if TWITTER_RE.match(src):\n return self.tweet(src, alt, title)\n\n return None\n\n def simple_image(self, src, alt=\"\", title=None):\n css_classes = \"\"\n title = title or alt\n if title in IMAGE_CSS_CLASSES:\n css_classes = IMAGE_CSS_CLASSES[title]\n\n image_tag = f'<img loading=\"lazy\" src=\"{escape_html(src)}\" alt=\"{escape_html(title)}\">'\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f'<figure class=\"{css_classes}\">{image_tag}{caption}</figure>'\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1))}'\n f'?autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def video(self, src, alt=\"\", title=None):\n video_tag = (\n f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def tweet(self, src, alt=\"\", title=None):\n tweet_match = TWITTER_RE.match(src)\n twitter_tag = f'<blockquote class=\"twitter-tweet\" tw-align-center>' \\\n f'<a href=\"{tweet_match.group(1)}\"></a></blockquote><br>' \\\n f'<a href=\"{src}\" target=\"_blank\">{src}</a>'\n return twitter_tag\n", "path": "common/markdown/club_renderer.py"}], "after_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(?:.*list=(PL[\\w\\-\\_]*))?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}, {"content": "import html\nimport mistune\nfrom urllib.parse import unquote\nfrom mistune import escape_html\nfrom slugify import slugify\n\nfrom common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE\n\nIMAGE_CSS_CLASSES = {\n \"-\": \"text-body-image-full\"\n}\n\n\nclass ClubRenderer(mistune.HTMLRenderer):\n def text(self, text):\n text = escape_html(text)\n text = USERNAME_RE.sub(r' <a href=\"/user/\\1/\">@\\1</a>', text)\n return text\n\n def paragraph(self, text):\n text = text.replace(\"\\n\", \"<br>\\n\") # Mistune 2.0 broke newlines, let's hack it =/\n return f\"<p>{text}</p>\\n\"\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n anchor = slugify(text[:24])\n return f\"<{tag} id=\\\"{anchor}\\\"><a href=\\\"#{anchor}\\\">{text}</a></{tag}>\\n\"\n\n def link(self, link, text=None, title=None):\n if not text and not title:\n # it's a pure link (without link tag) and we can try to parse it\n embed = self.embed(link, text or \"\", title or \"\")\n if embed:\n return embed\n\n if text is None:\n text = link\n\n # here's some magic of unescape->unquote->escape\n # to fix cyrillic (and other non-latin) wikipedia URLs\n return f'<a href=\"{self._safe_url(link)}\">{html.escape(unquote(html.unescape(text or link)))}</a>'\n\n def image(self, src, alt=\"\", title=None):\n embed = self.embed(src, alt, title)\n if embed:\n return embed\n\n # users can try to \"hack\" our parser by using non-image urls\n # so, if its not an image or video, display it as a link to avoid auto-loading\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a>'\n\n def embed(self, src, alt=\"\", title=None):\n if IMAGE_RE.match(src):\n return self.simple_image(src, alt, title)\n\n if YOUTUBE_RE.match(src):\n return self.youtube(src, alt, title)\n\n if VIDEO_RE.match(src):\n return self.video(src, alt, title)\n\n if TWITTER_RE.match(src):\n return self.tweet(src, alt, title)\n\n return None\n\n def simple_image(self, src, alt=\"\", title=None):\n css_classes = \"\"\n title = title or alt\n if title in IMAGE_CSS_CLASSES:\n css_classes = IMAGE_CSS_CLASSES[title]\n\n image_tag = f'<img loading=\"lazy\" src=\"{escape_html(src)}\" alt=\"{escape_html(title)}\">'\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f'<figure class=\"{css_classes}\">{image_tag}{caption}</figure>'\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n playlist = \"\"\n if youtube_match.group(2):\n playlist = f\"list={escape_html(youtube_match.group(2))}&listType=playlist&\"\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1))}'\n f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def video(self, src, alt=\"\", title=None):\n video_tag = (\n f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def tweet(self, src, alt=\"\", title=None):\n tweet_match = TWITTER_RE.match(src)\n twitter_tag = f'<blockquote class=\"twitter-tweet\" tw-align-center>' \\\n f'<a href=\"{tweet_match.group(1)}\"></a></blockquote><br>' \\\n f'<a href=\"{src}\" target=\"_blank\">{src}</a>'\n return twitter_tag\n", "path": "common/markdown/club_renderer.py"}]} | 1,857 | 528 |
gh_patches_debug_1295 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-2065 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModbusException 0x07 is missing in pdu.py
In pdu.py is ModbusException NegativeAcknowledge missing. Is it possible to add: NegativeAcknowledge = 0x07 ?
class ModbusExceptions:
IllegalFunction = 0x01
IllegalAddress = 0x02
IllegalValue = 0x03
SlaveFailure = 0x04
Acknowledge = 0x05
SlaveBusy = 0x06
MemoryParityError = 0x08
GatewayPathUnavailable = 0x0A
GatewayNoResponse = 0x0B
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/pdu.py`
Content:
```
1 """Contains base classes for modbus request/response/error packets."""
2
3 __all__ = [
4 "ModbusRequest",
5 "ModbusResponse",
6 "ModbusExceptions",
7 "ExceptionResponse",
8 "IllegalFunctionRequest",
9 ]
10
11 # pylint: disable=missing-type-doc
12 import struct
13
14 from pymodbus.exceptions import NotImplementedException
15 from pymodbus.logging import Log
16 from pymodbus.utilities import rtuFrameSize
17
18
19 # --------------------------------------------------------------------------- #
20 # Base PDUs
21 # --------------------------------------------------------------------------- #
22 class ModbusPDU:
23 """Base class for all Modbus messages.
24
25 .. attribute:: transaction_id
26
27 This value is used to uniquely identify a request
28 response pair. It can be implemented as a simple counter
29
30 .. attribute:: protocol_id
31
32 This is a constant set at 0 to indicate Modbus. It is
33 put here for ease of expansion.
34
35 .. attribute:: slave_id
36
37 This is used to route the request to the correct child. In
38 the TCP modbus, it is used for routing (or not used at all. However,
39 for the serial versions, it is used to specify which child to perform
40 the requests against. The value 0x00 represents the broadcast address
41 (also 0xff).
42
43 .. attribute:: check
44
45 This is used for LRC/CRC in the serial modbus protocols
46
47 .. attribute:: skip_encode
48
49 This is used when the message payload has already been encoded.
50 Generally this will occur when the PayloadBuilder is being used
51 to create a complicated message. By setting this to True, the
52 request will pass the currently encoded message through instead
53 of encoding it again.
54 """
55
56 def __init__(self, slave=0, **kwargs):
57 """Initialize the base data for a modbus request.
58
59 :param slave: Modbus slave slave ID
60
61 """
62 self.transaction_id = kwargs.get("transaction", 0)
63 self.protocol_id = kwargs.get("protocol", 0)
64 self.slave_id = slave
65 self.skip_encode = kwargs.get("skip_encode", False)
66 self.check = 0x0000
67
68 def encode(self):
69 """Encode the message.
70
71 :raises: A not implemented exception
72 """
73 raise NotImplementedException()
74
75 def decode(self, data):
76 """Decode data part of the message.
77
78 :param data: is a string object
79 :raises NotImplementedException:
80 """
81 raise NotImplementedException()
82
83 @classmethod
84 def calculateRtuFrameSize(cls, buffer):
85 """Calculate the size of a PDU.
86
87 :param buffer: A buffer containing the data that have been received.
88 :returns: The number of bytes in the PDU.
89 :raises NotImplementedException:
90 """
91 if hasattr(cls, "_rtu_frame_size"):
92 return cls._rtu_frame_size
93 if hasattr(cls, "_rtu_byte_count_pos"):
94 return rtuFrameSize(buffer, cls._rtu_byte_count_pos)
95 raise NotImplementedException(
96 f"Cannot determine RTU frame size for {cls.__name__}"
97 )
98
99
100 class ModbusRequest(ModbusPDU):
101 """Base class for a modbus request PDU."""
102
103 function_code = -1
104
105 def __init__(self, slave=0, **kwargs): # pylint: disable=useless-parent-delegation
106 """Proxy to the lower level initializer.
107
108 :param slave: Modbus slave slave ID
109 """
110 super().__init__(slave, **kwargs)
111
112 def doException(self, exception):
113 """Build an error response based on the function.
114
115 :param exception: The exception to return
116 :raises: An exception response
117 """
118 exc = ExceptionResponse(self.function_code, exception)
119 Log.error("Exception response {}", exc)
120 return exc
121
122
123 class ModbusResponse(ModbusPDU):
124 """Base class for a modbus response PDU.
125
126 .. attribute:: should_respond
127
128 A flag that indicates if this response returns a result back
129 to the client issuing the request
130
131 .. attribute:: _rtu_frame_size
132
133 Indicates the size of the modbus rtu response used for
134 calculating how much to read.
135 """
136
137 should_respond = True
138 function_code = 0x00
139
140 def __init__(self, slave=0, **kwargs):
141 """Proxy the lower level initializer.
142
143 :param slave: Modbus slave slave ID
144
145 """
146 super().__init__(slave, **kwargs)
147 self.bits = []
148 self.registers = []
149
150 def isError(self) -> bool:
151 """Check if the error is a success or failure."""
152 return self.function_code > 0x80
153
154
155 # --------------------------------------------------------------------------- #
156 # Exception PDUs
157 # --------------------------------------------------------------------------- #
158 class ModbusExceptions: # pylint: disable=too-few-public-methods
159 """An enumeration of the valid modbus exceptions."""
160
161 IllegalFunction = 0x01
162 IllegalAddress = 0x02
163 IllegalValue = 0x03
164 SlaveFailure = 0x04
165 Acknowledge = 0x05
166 SlaveBusy = 0x06
167 MemoryParityError = 0x08
168 GatewayPathUnavailable = 0x0A
169 GatewayNoResponse = 0x0B
170
171 @classmethod
172 def decode(cls, code):
173 """Give an error code, translate it to a string error name.
174
175 :param code: The code number to translate
176 """
177 values = {
178 v: k
179 for k, v in iter(cls.__dict__.items())
180 if not k.startswith("__") and not callable(v)
181 }
182 return values.get(code, None)
183
184
185 class ExceptionResponse(ModbusResponse):
186 """Base class for a modbus exception PDU."""
187
188 ExceptionOffset = 0x80
189 _rtu_frame_size = 5
190
191 def __init__(self, function_code, exception_code=None, **kwargs):
192 """Initialize the modbus exception response.
193
194 :param function_code: The function to build an exception response for
195 :param exception_code: The specific modbus exception to return
196 """
197 super().__init__(**kwargs)
198 self.original_code = function_code
199 self.function_code = function_code | self.ExceptionOffset
200 self.exception_code = exception_code
201
202 def encode(self):
203 """Encode a modbus exception response.
204
205 :returns: The encoded exception packet
206 """
207 return struct.pack(">B", self.exception_code)
208
209 def decode(self, data):
210 """Decode a modbus exception response.
211
212 :param data: The packet data to decode
213 """
214 self.exception_code = int(data[0])
215
216 def __str__(self):
217 """Build a representation of an exception response.
218
219 :returns: The string representation of an exception response
220 """
221 message = ModbusExceptions.decode(self.exception_code)
222 parameters = (self.function_code, self.original_code, message)
223 return (
224 "Exception Response(%d, %d, %s)" # pylint: disable=consider-using-f-string
225 % parameters
226 )
227
228
229 class IllegalFunctionRequest(ModbusRequest):
230 """Define the Modbus slave exception type "Illegal Function".
231
232 This exception code is returned if the slave::
233
234 - does not implement the function code **or**
235 - is not in a state that allows it to process the function
236 """
237
238 ErrorCode = 1
239
240 def __init__(self, function_code, **kwargs):
241 """Initialize a IllegalFunctionRequest.
242
243 :param function_code: The function we are erroring on
244 """
245 super().__init__(**kwargs)
246 self.function_code = function_code
247
248 def decode(self, _data):
249 """Decode so this failure will run correctly."""
250
251 def execute(self, _context):
252 """Build an illegal function request error response.
253
254 :returns: The error response packet
255 """
256 return ExceptionResponse(self.function_code, self.ErrorCode)
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymodbus/pdu.py b/pymodbus/pdu.py
--- a/pymodbus/pdu.py
+++ b/pymodbus/pdu.py
@@ -164,6 +164,7 @@
SlaveFailure = 0x04
Acknowledge = 0x05
SlaveBusy = 0x06
+ NegativeAcknowledge = 0x07
MemoryParityError = 0x08
GatewayPathUnavailable = 0x0A
GatewayNoResponse = 0x0B
| {"golden_diff": "diff --git a/pymodbus/pdu.py b/pymodbus/pdu.py\n--- a/pymodbus/pdu.py\n+++ b/pymodbus/pdu.py\n@@ -164,6 +164,7 @@\n SlaveFailure = 0x04\n Acknowledge = 0x05\n SlaveBusy = 0x06\n+ NegativeAcknowledge = 0x07\n MemoryParityError = 0x08\n GatewayPathUnavailable = 0x0A\n GatewayNoResponse = 0x0B\n", "issue": "ModbusException 0x07 is missing in pdu.py\nIn pdu.py is ModbusException NegativeAcknowledge missing. Is it possible to add: NegativeAcknowledge = 0x07 ?\r\n\r\n\r\nclass ModbusExceptions: \r\n\r\n IllegalFunction = 0x01\r\n IllegalAddress = 0x02\r\n IllegalValue = 0x03\r\n SlaveFailure = 0x04\r\n Acknowledge = 0x05\r\n SlaveBusy = 0x06\r\n MemoryParityError = 0x08\r\n GatewayPathUnavailable = 0x0A\r\n GatewayNoResponse = 0x0B\n", "before_files": [{"content": "\"\"\"Contains base classes for modbus request/response/error packets.\"\"\"\n\n__all__ = [\n \"ModbusRequest\",\n \"ModbusResponse\",\n \"ModbusExceptions\",\n \"ExceptionResponse\",\n \"IllegalFunctionRequest\",\n]\n\n# pylint: disable=missing-type-doc\nimport struct\n\nfrom pymodbus.exceptions import NotImplementedException\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import rtuFrameSize\n\n\n# --------------------------------------------------------------------------- #\n# Base PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusPDU:\n \"\"\"Base class for all Modbus messages.\n\n .. attribute:: transaction_id\n\n This value is used to uniquely identify a request\n response pair. It can be implemented as a simple counter\n\n .. attribute:: protocol_id\n\n This is a constant set at 0 to indicate Modbus. It is\n put here for ease of expansion.\n\n .. attribute:: slave_id\n\n This is used to route the request to the correct child. In\n the TCP modbus, it is used for routing (or not used at all. However,\n for the serial versions, it is used to specify which child to perform\n the requests against. The value 0x00 represents the broadcast address\n (also 0xff).\n\n .. attribute:: check\n\n This is used for LRC/CRC in the serial modbus protocols\n\n .. attribute:: skip_encode\n\n This is used when the message payload has already been encoded.\n Generally this will occur when the PayloadBuilder is being used\n to create a complicated message. By setting this to True, the\n request will pass the currently encoded message through instead\n of encoding it again.\n \"\"\"\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Initialize the base data for a modbus request.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n self.transaction_id = kwargs.get(\"transaction\", 0)\n self.protocol_id = kwargs.get(\"protocol\", 0)\n self.slave_id = slave\n self.skip_encode = kwargs.get(\"skip_encode\", False)\n self.check = 0x0000\n\n def encode(self):\n \"\"\"Encode the message.\n\n :raises: A not implemented exception\n \"\"\"\n raise NotImplementedException()\n\n def decode(self, data):\n \"\"\"Decode data part of the message.\n\n :param data: is a string object\n :raises NotImplementedException:\n \"\"\"\n raise NotImplementedException()\n\n @classmethod\n def calculateRtuFrameSize(cls, buffer):\n \"\"\"Calculate the size of a PDU.\n\n :param buffer: A buffer containing the data that have been received.\n :returns: The number of bytes in the PDU.\n :raises NotImplementedException:\n \"\"\"\n if hasattr(cls, \"_rtu_frame_size\"):\n return cls._rtu_frame_size\n if hasattr(cls, \"_rtu_byte_count_pos\"):\n return rtuFrameSize(buffer, cls._rtu_byte_count_pos)\n raise NotImplementedException(\n f\"Cannot determine RTU frame size for {cls.__name__}\"\n )\n\n\nclass ModbusRequest(ModbusPDU):\n \"\"\"Base class for a modbus request PDU.\"\"\"\n\n function_code = -1\n\n def __init__(self, slave=0, **kwargs): # pylint: disable=useless-parent-delegation\n \"\"\"Proxy to the lower level initializer.\n\n :param slave: Modbus slave slave ID\n \"\"\"\n super().__init__(slave, **kwargs)\n\n def doException(self, exception):\n \"\"\"Build an error response based on the function.\n\n :param exception: The exception to return\n :raises: An exception response\n \"\"\"\n exc = ExceptionResponse(self.function_code, exception)\n Log.error(\"Exception response {}\", exc)\n return exc\n\n\nclass ModbusResponse(ModbusPDU):\n \"\"\"Base class for a modbus response PDU.\n\n .. attribute:: should_respond\n\n A flag that indicates if this response returns a result back\n to the client issuing the request\n\n .. attribute:: _rtu_frame_size\n\n Indicates the size of the modbus rtu response used for\n calculating how much to read.\n \"\"\"\n\n should_respond = True\n function_code = 0x00\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Proxy the lower level initializer.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n super().__init__(slave, **kwargs)\n self.bits = []\n self.registers = []\n\n def isError(self) -> bool:\n \"\"\"Check if the error is a success or failure.\"\"\"\n return self.function_code > 0x80\n\n\n# --------------------------------------------------------------------------- #\n# Exception PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusExceptions: # pylint: disable=too-few-public-methods\n \"\"\"An enumeration of the valid modbus exceptions.\"\"\"\n\n IllegalFunction = 0x01\n IllegalAddress = 0x02\n IllegalValue = 0x03\n SlaveFailure = 0x04\n Acknowledge = 0x05\n SlaveBusy = 0x06\n MemoryParityError = 0x08\n GatewayPathUnavailable = 0x0A\n GatewayNoResponse = 0x0B\n\n @classmethod\n def decode(cls, code):\n \"\"\"Give an error code, translate it to a string error name.\n\n :param code: The code number to translate\n \"\"\"\n values = {\n v: k\n for k, v in iter(cls.__dict__.items())\n if not k.startswith(\"__\") and not callable(v)\n }\n return values.get(code, None)\n\n\nclass ExceptionResponse(ModbusResponse):\n \"\"\"Base class for a modbus exception PDU.\"\"\"\n\n ExceptionOffset = 0x80\n _rtu_frame_size = 5\n\n def __init__(self, function_code, exception_code=None, **kwargs):\n \"\"\"Initialize the modbus exception response.\n\n :param function_code: The function to build an exception response for\n :param exception_code: The specific modbus exception to return\n \"\"\"\n super().__init__(**kwargs)\n self.original_code = function_code\n self.function_code = function_code | self.ExceptionOffset\n self.exception_code = exception_code\n\n def encode(self):\n \"\"\"Encode a modbus exception response.\n\n :returns: The encoded exception packet\n \"\"\"\n return struct.pack(\">B\", self.exception_code)\n\n def decode(self, data):\n \"\"\"Decode a modbus exception response.\n\n :param data: The packet data to decode\n \"\"\"\n self.exception_code = int(data[0])\n\n def __str__(self):\n \"\"\"Build a representation of an exception response.\n\n :returns: The string representation of an exception response\n \"\"\"\n message = ModbusExceptions.decode(self.exception_code)\n parameters = (self.function_code, self.original_code, message)\n return (\n \"Exception Response(%d, %d, %s)\" # pylint: disable=consider-using-f-string\n % parameters\n )\n\n\nclass IllegalFunctionRequest(ModbusRequest):\n \"\"\"Define the Modbus slave exception type \"Illegal Function\".\n\n This exception code is returned if the slave::\n\n - does not implement the function code **or**\n - is not in a state that allows it to process the function\n \"\"\"\n\n ErrorCode = 1\n\n def __init__(self, function_code, **kwargs):\n \"\"\"Initialize a IllegalFunctionRequest.\n\n :param function_code: The function we are erroring on\n \"\"\"\n super().__init__(**kwargs)\n self.function_code = function_code\n\n def decode(self, _data):\n \"\"\"Decode so this failure will run correctly.\"\"\"\n\n def execute(self, _context):\n \"\"\"Build an illegal function request error response.\n\n :returns: The error response packet\n \"\"\"\n return ExceptionResponse(self.function_code, self.ErrorCode)\n", "path": "pymodbus/pdu.py"}], "after_files": [{"content": "\"\"\"Contains base classes for modbus request/response/error packets.\"\"\"\n\n__all__ = [\n \"ModbusRequest\",\n \"ModbusResponse\",\n \"ModbusExceptions\",\n \"ExceptionResponse\",\n \"IllegalFunctionRequest\",\n]\n\n# pylint: disable=missing-type-doc\nimport struct\n\nfrom pymodbus.exceptions import NotImplementedException\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import rtuFrameSize\n\n\n# --------------------------------------------------------------------------- #\n# Base PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusPDU:\n \"\"\"Base class for all Modbus messages.\n\n .. attribute:: transaction_id\n\n This value is used to uniquely identify a request\n response pair. It can be implemented as a simple counter\n\n .. attribute:: protocol_id\n\n This is a constant set at 0 to indicate Modbus. It is\n put here for ease of expansion.\n\n .. attribute:: slave_id\n\n This is used to route the request to the correct child. In\n the TCP modbus, it is used for routing (or not used at all. However,\n for the serial versions, it is used to specify which child to perform\n the requests against. The value 0x00 represents the broadcast address\n (also 0xff).\n\n .. attribute:: check\n\n This is used for LRC/CRC in the serial modbus protocols\n\n .. attribute:: skip_encode\n\n This is used when the message payload has already been encoded.\n Generally this will occur when the PayloadBuilder is being used\n to create a complicated message. By setting this to True, the\n request will pass the currently encoded message through instead\n of encoding it again.\n \"\"\"\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Initialize the base data for a modbus request.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n self.transaction_id = kwargs.get(\"transaction\", 0)\n self.protocol_id = kwargs.get(\"protocol\", 0)\n self.slave_id = slave\n self.skip_encode = kwargs.get(\"skip_encode\", False)\n self.check = 0x0000\n\n def encode(self):\n \"\"\"Encode the message.\n\n :raises: A not implemented exception\n \"\"\"\n raise NotImplementedException()\n\n def decode(self, data):\n \"\"\"Decode data part of the message.\n\n :param data: is a string object\n :raises NotImplementedException:\n \"\"\"\n raise NotImplementedException()\n\n @classmethod\n def calculateRtuFrameSize(cls, buffer):\n \"\"\"Calculate the size of a PDU.\n\n :param buffer: A buffer containing the data that have been received.\n :returns: The number of bytes in the PDU.\n :raises NotImplementedException:\n \"\"\"\n if hasattr(cls, \"_rtu_frame_size\"):\n return cls._rtu_frame_size\n if hasattr(cls, \"_rtu_byte_count_pos\"):\n return rtuFrameSize(buffer, cls._rtu_byte_count_pos)\n raise NotImplementedException(\n f\"Cannot determine RTU frame size for {cls.__name__}\"\n )\n\n\nclass ModbusRequest(ModbusPDU):\n \"\"\"Base class for a modbus request PDU.\"\"\"\n\n function_code = -1\n\n def __init__(self, slave=0, **kwargs): # pylint: disable=useless-parent-delegation\n \"\"\"Proxy to the lower level initializer.\n\n :param slave: Modbus slave slave ID\n \"\"\"\n super().__init__(slave, **kwargs)\n\n def doException(self, exception):\n \"\"\"Build an error response based on the function.\n\n :param exception: The exception to return\n :raises: An exception response\n \"\"\"\n exc = ExceptionResponse(self.function_code, exception)\n Log.error(\"Exception response {}\", exc)\n return exc\n\n\nclass ModbusResponse(ModbusPDU):\n \"\"\"Base class for a modbus response PDU.\n\n .. attribute:: should_respond\n\n A flag that indicates if this response returns a result back\n to the client issuing the request\n\n .. attribute:: _rtu_frame_size\n\n Indicates the size of the modbus rtu response used for\n calculating how much to read.\n \"\"\"\n\n should_respond = True\n function_code = 0x00\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Proxy the lower level initializer.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n super().__init__(slave, **kwargs)\n self.bits = []\n self.registers = []\n\n def isError(self) -> bool:\n \"\"\"Check if the error is a success or failure.\"\"\"\n return self.function_code > 0x80\n\n\n# --------------------------------------------------------------------------- #\n# Exception PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusExceptions: # pylint: disable=too-few-public-methods\n \"\"\"An enumeration of the valid modbus exceptions.\"\"\"\n\n IllegalFunction = 0x01\n IllegalAddress = 0x02\n IllegalValue = 0x03\n SlaveFailure = 0x04\n Acknowledge = 0x05\n SlaveBusy = 0x06\n NegativeAcknowledge = 0x07\n MemoryParityError = 0x08\n GatewayPathUnavailable = 0x0A\n GatewayNoResponse = 0x0B\n\n @classmethod\n def decode(cls, code):\n \"\"\"Give an error code, translate it to a string error name.\n\n :param code: The code number to translate\n \"\"\"\n values = {\n v: k\n for k, v in iter(cls.__dict__.items())\n if not k.startswith(\"__\") and not callable(v)\n }\n return values.get(code, None)\n\n\nclass ExceptionResponse(ModbusResponse):\n \"\"\"Base class for a modbus exception PDU.\"\"\"\n\n ExceptionOffset = 0x80\n _rtu_frame_size = 5\n\n def __init__(self, function_code, exception_code=None, **kwargs):\n \"\"\"Initialize the modbus exception response.\n\n :param function_code: The function to build an exception response for\n :param exception_code: The specific modbus exception to return\n \"\"\"\n super().__init__(**kwargs)\n self.original_code = function_code\n self.function_code = function_code | self.ExceptionOffset\n self.exception_code = exception_code\n\n def encode(self):\n \"\"\"Encode a modbus exception response.\n\n :returns: The encoded exception packet\n \"\"\"\n return struct.pack(\">B\", self.exception_code)\n\n def decode(self, data):\n \"\"\"Decode a modbus exception response.\n\n :param data: The packet data to decode\n \"\"\"\n self.exception_code = int(data[0])\n\n def __str__(self):\n \"\"\"Build a representation of an exception response.\n\n :returns: The string representation of an exception response\n \"\"\"\n message = ModbusExceptions.decode(self.exception_code)\n parameters = (self.function_code, self.original_code, message)\n return (\n \"Exception Response(%d, %d, %s)\" # pylint: disable=consider-using-f-string\n % parameters\n )\n\n\nclass IllegalFunctionRequest(ModbusRequest):\n \"\"\"Define the Modbus slave exception type \"Illegal Function\".\n\n This exception code is returned if the slave::\n\n - does not implement the function code **or**\n - is not in a state that allows it to process the function\n \"\"\"\n\n ErrorCode = 1\n\n def __init__(self, function_code, **kwargs):\n \"\"\"Initialize a IllegalFunctionRequest.\n\n :param function_code: The function we are erroring on\n \"\"\"\n super().__init__(**kwargs)\n self.function_code = function_code\n\n def decode(self, _data):\n \"\"\"Decode so this failure will run correctly.\"\"\"\n\n def execute(self, _context):\n \"\"\"Build an illegal function request error response.\n\n :returns: The error response packet\n \"\"\"\n return ExceptionResponse(self.function_code, self.ErrorCode)\n", "path": "pymodbus/pdu.py"}]} | 2,806 | 130 |
gh_patches_debug_67495 | rasdani/github-patches | git_diff | vllm-project__vllm-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RayWorker import error
When import `vllm` I got following error
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/code/vllm/vllm/__init__.py", line 4, in <module>
from vllm.engine.async_llm_engine import AsyncLLMEngine
File "/code/vllm/vllm/engine/async_llm_engine.py", line 7, in <module>
from vllm.engine.llm_engine import LLMEngine
File "/code/vllm/vllm/engine/llm_engine.py", line 9, in <module>
from vllm.engine.ray_utils import initialize_cluster, ray, RayWorker
ImportError: cannot import name 'RayWorker' from 'vllm.engine.ray_utils'
```
It seems `ray` requires `pandas` I haven't installed it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/engine/ray_utils.py`
Content:
```
1 import socket
2 from typing import Optional, Tuple, TYPE_CHECKING
3
4 from vllm.config import ParallelConfig
5
6 try:
7 import ray
8 from ray.air.util.torch_dist import TorchDistributedWorker
9
10 class RayWorker(TorchDistributedWorker):
11 """Ray wrapper for vllm.worker.Worker, allowing Worker to be
12 lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES."""
13
14 def __init__(self) -> None:
15 self.worker = None
16
17 def init_worker(self, worker_init_fn):
18 self.worker = worker_init_fn()
19
20 def __getattr__(self, name):
21 return getattr(self.worker, name)
22
23 def execute_method(self, method, *args, **kwargs):
24 executor = getattr(self, method)
25 return executor(*args, **kwargs)
26
27 except ImportError:
28 ray = None
29 TorchDistributedWorker = None
30
31 if TYPE_CHECKING:
32 from ray.util.placement_group import PlacementGroup
33
34
35 def get_open_port():
36 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
37 s.bind(("", 0))
38 return s.getsockname()[1]
39
40
41 def initialize_cluster(
42 parallel_config: ParallelConfig,
43 engine_use_ray: bool = False,
44 ray_address: Optional[str] = None,
45 ) -> Tuple[str, Optional["PlacementGroup"]]:
46 """Initialize the distributed cluster probably with Ray.
47
48 Args:
49 parallel_config: The configurations for parallel execution.
50 engine_use_ray: Whether to use Ray for async engine.
51 ray_address: The address of the Ray cluster. If None, uses
52 the default Ray cluster address.
53
54 Returns:
55 A tuple of (`distributed_init_method`, `all_stage_devices`). The
56 `distributed_init_method` is the address for initializing the
57 distributed backend. `all_stage_devices` includes device IDs for
58 each worker in each pipeline stage. Each device ID is a tuple of
59 (rank, node resource, device id).
60 """
61 if parallel_config.worker_use_ray or engine_use_ray:
62 if ray is None:
63 raise ImportError(
64 "Ray is not installed. Please install Ray to use distributed "
65 "serving.")
66 # Connect to a ray cluster.
67 ray.init(address=ray_address, ignore_reinit_error=True)
68
69 if not parallel_config.worker_use_ray:
70 # Initialize cluster locally.
71 port = get_open_port()
72 # We need to setup the distributed init method to make sure
73 # the distributed megatron code (e.g., get world size) works correctly.
74 distributed_init_method = f"tcp://localhost:{port}"
75 return distributed_init_method, None
76
77 current_placement_group = ray.util.get_current_placement_group()
78 if current_placement_group:
79 # We are in a placement group
80 bundles = current_placement_group.bundle_specs
81 # Verify that we can use the placement group.
82 gpu_bundles = 0
83 for bundle in bundles:
84 bundle_gpus = bundle.get("GPU", 0)
85 if bundle_gpus > 1:
86 raise ValueError(
87 "Placement group bundle cannot have more than 1 GPU.")
88 if bundle_gpus:
89 gpu_bundles += 1
90 if parallel_config.world_size > gpu_bundles:
91 raise ValueError(
92 "The number of required GPUs exceeds the total number of "
93 "available GPUs in the placement group.")
94 else:
95 num_gpus_in_cluster = ray.cluster_resources().get("GPU", 0)
96 if parallel_config.world_size > num_gpus_in_cluster:
97 raise ValueError(
98 "The number of required GPUs exceeds the total number of "
99 "available GPUs in the cluster.")
100 # Create a new placement group
101 current_placement_group = ray.util.placement_group([{
102 "GPU": 1
103 }] * parallel_config.world_size)
104 # Wait until PG is ready - this will block until all
105 # requested resources are available, and will timeout
106 # if they cannot be provisioned.
107 ray.get(current_placement_group.ready(), timeout=1800)
108
109 return None, current_placement_group
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vllm/engine/ray_utils.py b/vllm/engine/ray_utils.py
--- a/vllm/engine/ray_utils.py
+++ b/vllm/engine/ray_utils.py
@@ -27,6 +27,7 @@
except ImportError:
ray = None
TorchDistributedWorker = None
+ RayWorker = None # pylint: disable=invalid-name
if TYPE_CHECKING:
from ray.util.placement_group import PlacementGroup
| {"golden_diff": "diff --git a/vllm/engine/ray_utils.py b/vllm/engine/ray_utils.py\n--- a/vllm/engine/ray_utils.py\n+++ b/vllm/engine/ray_utils.py\n@@ -27,6 +27,7 @@\n except ImportError:\n ray = None\n TorchDistributedWorker = None\n+ RayWorker = None # pylint: disable=invalid-name\n \n if TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n", "issue": "RayWorker import error\nWhen import `vllm` I got following error\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/code/vllm/vllm/__init__.py\", line 4, in <module>\r\n from vllm.engine.async_llm_engine import AsyncLLMEngine\r\n File \"/code/vllm/vllm/engine/async_llm_engine.py\", line 7, in <module>\r\n from vllm.engine.llm_engine import LLMEngine\r\n File \"/code/vllm/vllm/engine/llm_engine.py\", line 9, in <module>\r\n from vllm.engine.ray_utils import initialize_cluster, ray, RayWorker\r\nImportError: cannot import name 'RayWorker' from 'vllm.engine.ray_utils' \r\n```\r\nIt seems `ray` requires `pandas` I haven't installed it.\r\n\n", "before_files": [{"content": "import socket\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nfrom vllm.config import ParallelConfig\n\ntry:\n import ray\n from ray.air.util.torch_dist import TorchDistributedWorker\n\n class RayWorker(TorchDistributedWorker):\n \"\"\"Ray wrapper for vllm.worker.Worker, allowing Worker to be\n lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES.\"\"\"\n\n def __init__(self) -> None:\n self.worker = None\n\n def init_worker(self, worker_init_fn):\n self.worker = worker_init_fn()\n\n def __getattr__(self, name):\n return getattr(self.worker, name)\n\n def execute_method(self, method, *args, **kwargs):\n executor = getattr(self, method)\n return executor(*args, **kwargs)\n\nexcept ImportError:\n ray = None\n TorchDistributedWorker = None\n\nif TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n\n\ndef get_open_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"\", 0))\n return s.getsockname()[1]\n\n\ndef initialize_cluster(\n parallel_config: ParallelConfig,\n engine_use_ray: bool = False,\n ray_address: Optional[str] = None,\n) -> Tuple[str, Optional[\"PlacementGroup\"]]:\n \"\"\"Initialize the distributed cluster probably with Ray.\n\n Args:\n parallel_config: The configurations for parallel execution.\n engine_use_ray: Whether to use Ray for async engine.\n ray_address: The address of the Ray cluster. If None, uses\n the default Ray cluster address.\n\n Returns:\n A tuple of (`distributed_init_method`, `all_stage_devices`). The\n `distributed_init_method` is the address for initializing the\n distributed backend. `all_stage_devices` includes device IDs for\n each worker in each pipeline stage. Each device ID is a tuple of\n (rank, node resource, device id).\n \"\"\"\n if parallel_config.worker_use_ray or engine_use_ray:\n if ray is None:\n raise ImportError(\n \"Ray is not installed. Please install Ray to use distributed \"\n \"serving.\")\n # Connect to a ray cluster.\n ray.init(address=ray_address, ignore_reinit_error=True)\n\n if not parallel_config.worker_use_ray:\n # Initialize cluster locally.\n port = get_open_port()\n # We need to setup the distributed init method to make sure\n # the distributed megatron code (e.g., get world size) works correctly.\n distributed_init_method = f\"tcp://localhost:{port}\"\n return distributed_init_method, None\n\n current_placement_group = ray.util.get_current_placement_group()\n if current_placement_group:\n # We are in a placement group\n bundles = current_placement_group.bundle_specs\n # Verify that we can use the placement group.\n gpu_bundles = 0\n for bundle in bundles:\n bundle_gpus = bundle.get(\"GPU\", 0)\n if bundle_gpus > 1:\n raise ValueError(\n \"Placement group bundle cannot have more than 1 GPU.\")\n if bundle_gpus:\n gpu_bundles += 1\n if parallel_config.world_size > gpu_bundles:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the placement group.\")\n else:\n num_gpus_in_cluster = ray.cluster_resources().get(\"GPU\", 0)\n if parallel_config.world_size > num_gpus_in_cluster:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the cluster.\")\n # Create a new placement group\n current_placement_group = ray.util.placement_group([{\n \"GPU\": 1\n }] * parallel_config.world_size)\n # Wait until PG is ready - this will block until all\n # requested resources are available, and will timeout\n # if they cannot be provisioned.\n ray.get(current_placement_group.ready(), timeout=1800)\n\n return None, current_placement_group\n", "path": "vllm/engine/ray_utils.py"}], "after_files": [{"content": "import socket\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nfrom vllm.config import ParallelConfig\n\ntry:\n import ray\n from ray.air.util.torch_dist import TorchDistributedWorker\n\n class RayWorker(TorchDistributedWorker):\n \"\"\"Ray wrapper for vllm.worker.Worker, allowing Worker to be\n lazliy initialized after Ray sets CUDA_VISIBLE_DEVICES.\"\"\"\n\n def __init__(self) -> None:\n self.worker = None\n\n def init_worker(self, worker_init_fn):\n self.worker = worker_init_fn()\n\n def __getattr__(self, name):\n return getattr(self.worker, name)\n\n def execute_method(self, method, *args, **kwargs):\n executor = getattr(self, method)\n return executor(*args, **kwargs)\n\nexcept ImportError:\n ray = None\n TorchDistributedWorker = None\n RayWorker = None # pylint: disable=invalid-name\n\nif TYPE_CHECKING:\n from ray.util.placement_group import PlacementGroup\n\n\ndef get_open_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"\", 0))\n return s.getsockname()[1]\n\n\ndef initialize_cluster(\n parallel_config: ParallelConfig,\n engine_use_ray: bool = False,\n ray_address: Optional[str] = None,\n) -> Tuple[str, Optional[\"PlacementGroup\"]]:\n \"\"\"Initialize the distributed cluster probably with Ray.\n\n Args:\n parallel_config: The configurations for parallel execution.\n engine_use_ray: Whether to use Ray for async engine.\n ray_address: The address of the Ray cluster. If None, uses\n the default Ray cluster address.\n\n Returns:\n A tuple of (`distributed_init_method`, `all_stage_devices`). The\n `distributed_init_method` is the address for initializing the\n distributed backend. `all_stage_devices` includes device IDs for\n each worker in each pipeline stage. Each device ID is a tuple of\n (rank, node resource, device id).\n \"\"\"\n if parallel_config.worker_use_ray or engine_use_ray:\n if ray is None:\n raise ImportError(\n \"Ray is not installed. Please install Ray to use distributed \"\n \"serving.\")\n # Connect to a ray cluster.\n ray.init(address=ray_address, ignore_reinit_error=True)\n\n if not parallel_config.worker_use_ray:\n # Initialize cluster locally.\n port = get_open_port()\n # We need to setup the distributed init method to make sure\n # the distributed megatron code (e.g., get world size) works correctly.\n distributed_init_method = f\"tcp://localhost:{port}\"\n return distributed_init_method, None\n\n current_placement_group = ray.util.get_current_placement_group()\n if current_placement_group:\n # We are in a placement group\n bundles = current_placement_group.bundle_specs\n # Verify that we can use the placement group.\n gpu_bundles = 0\n for bundle in bundles:\n bundle_gpus = bundle.get(\"GPU\", 0)\n if bundle_gpus > 1:\n raise ValueError(\n \"Placement group bundle cannot have more than 1 GPU.\")\n if bundle_gpus:\n gpu_bundles += 1\n if parallel_config.world_size > gpu_bundles:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the placement group.\")\n else:\n num_gpus_in_cluster = ray.cluster_resources().get(\"GPU\", 0)\n if parallel_config.world_size > num_gpus_in_cluster:\n raise ValueError(\n \"The number of required GPUs exceeds the total number of \"\n \"available GPUs in the cluster.\")\n # Create a new placement group\n current_placement_group = ray.util.placement_group([{\n \"GPU\": 1\n }] * parallel_config.world_size)\n # Wait until PG is ready - this will block until all\n # requested resources are available, and will timeout\n # if they cannot be provisioned.\n ray.get(current_placement_group.ready(), timeout=1800)\n\n return None, current_placement_group\n", "path": "vllm/engine/ray_utils.py"}]} | 1,552 | 103 |
gh_patches_debug_14593 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-703 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create classes to represent ionization state distributions
My plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability.
Eventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352.
This will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/classes/sources/plasma3d.py`
Content:
```
1 """
2 Defines the core Plasma class used by PlasmaPy to represent plasma properties.
3 """
4
5 import numpy as np
6 import astropy.units as u
7 import itertools
8
9 from plasmapy.formulary.magnetostatics import MagnetoStatics
10
11 from astropy.constants import mu0
12
13 from plasmapy.classes import GenericPlasma
14
15 __all__ = [
16 "Plasma3D"
17 ]
18
19
20 class Plasma3D(GenericPlasma):
21 """
22 Core class for describing and calculating plasma parameters with
23 spatial dimensions.
24
25 Attributes
26 ----------
27 x : `astropy.units.Quantity`
28 x-coordinates within the plasma domain. Equal to the
29 `domain_x` input parameter.
30 y : `astropy.units.Quantity`
31 y-coordinates within the plasma domain. Equal to the
32 `domain_y` input parameter.
33 z : `astropy.units.Quantity`
34 z-coordinates within the plasma domain. Equal to the
35 `domain_z` input parameter.
36 grid : `astropy.units.Quantity`
37 (3, x, y, z) array containing the values of each coordinate at
38 every point in the domain.
39 domain_shape : tuple
40 Shape of the plasma domain.
41 density : `astropy.units.Quantity`
42 (x, y, z) array of mass density at every point in the domain.
43 momentum : `astropy.units.Quantity`
44 (3, x, y, z) array of the momentum vector at every point in
45 the domain.
46 pressure : `astropy.units.Quantity`
47 (x, y, z) array of pressure at every point in the domain.
48 magnetic_field : `astropy.units.Quantity`
49 (3, x, y, z) array of the magnetic field vector at every point
50 in the domain.
51
52 Parameters
53 ----------
54 domain_x : `astropy.units.Quantity`
55 1D array of x-coordinates for the plasma domain. Must have
56 units convertable to length.
57 domain_y : `astropy.units.Quantity`
58 1D array of y-coordinates for the plasma domain. Must have
59 units convertable to length.
60 domain_z : `astropy.units.Quantity`
61 1D array of z-coordinates for the plasma domain. Must have
62 units convertable to length.
63
64 """
65 @u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m)
66 def __init__(self, domain_x, domain_y, domain_z):
67 # Define domain sizes
68 self.x = domain_x
69 self.y = domain_y
70 self.z = domain_z
71
72 self.grid = np.array(np.meshgrid(self.x, self.y, self.z,
73 indexing='ij'))
74 self.domain_shape = (len(self.x), len(self.y), len(self.z))
75
76 # Initiate core plasma variables
77 self.density = np.zeros(self.domain_shape) * u.kg / u.m**3
78 self.momentum = np.zeros((3, *self.domain_shape)) * u.kg / (u.m ** 2 * u.s)
79 self.pressure = np.zeros(self.domain_shape) * u.Pa
80 self.magnetic_field = np.zeros((3, *self.domain_shape)) * u.T
81 self.electric_field = np.zeros((3, *self.domain_shape)) * u.V / u.m
82
83 @property
84 def velocity(self):
85 return self.momentum / self.density
86
87 @property
88 def magnetic_field_strength(self):
89 B = self.magnetic_field
90 return np.sqrt(np.sum(B * B, axis=0))
91
92 @property
93 def electric_field_strength(self):
94 E = self.electric_field
95 return np.sqrt(np.sum(E * E, axis=0))
96
97 @property
98 def alfven_speed(self):
99 B = self.magnetic_field
100 rho = self.density
101 return np.sqrt(np.sum(B * B, axis=0) / (mu0 * rho))
102
103 @classmethod
104 def is_datasource_for(cls, **kwargs):
105 if len(kwargs) == 3:
106 match = all(f'domain_{direction}' in kwargs.keys() for direction in 'xyz')
107 else:
108 match = False
109 return match
110
111 def add_magnetostatic(self, *mstats: MagnetoStatics):
112 # for each MagnetoStatic argument
113 for mstat in mstats:
114 # loop over 3D-index (ix,iy,iz)
115 for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):
116 # get coordinate
117 p = self.grid[(slice(None),)+point_index] # function as [:, *index]
118 # calculate magnetic field at this point and add back
119 self.magnetic_field[(slice(None),)+point_index] += mstat.magnetic_field(p)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/classes/sources/plasma3d.py b/plasmapy/classes/sources/plasma3d.py
--- a/plasmapy/classes/sources/plasma3d.py
+++ b/plasmapy/classes/sources/plasma3d.py
@@ -110,9 +110,10 @@
def add_magnetostatic(self, *mstats: MagnetoStatics):
# for each MagnetoStatic argument
+ prod = itertools.product(*[list(range(n)) for n in self.domain_shape])
for mstat in mstats:
# loop over 3D-index (ix,iy,iz)
- for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):
+ for point_index in prod:
# get coordinate
p = self.grid[(slice(None),)+point_index] # function as [:, *index]
# calculate magnetic field at this point and add back
| {"golden_diff": "diff --git a/plasmapy/classes/sources/plasma3d.py b/plasmapy/classes/sources/plasma3d.py\n--- a/plasmapy/classes/sources/plasma3d.py\n+++ b/plasmapy/classes/sources/plasma3d.py\n@@ -110,9 +110,10 @@\n \n def add_magnetostatic(self, *mstats: MagnetoStatics):\n # for each MagnetoStatic argument\n+ prod = itertools.product(*[list(range(n)) for n in self.domain_shape])\n for mstat in mstats:\n # loop over 3D-index (ix,iy,iz)\n- for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):\n+ for point_index in prod:\n # get coordinate\n p = self.grid[(slice(None),)+point_index] # function as [:, *index]\n # calculate magnetic field at this point and add back\n", "issue": "Create classes to represent ionization state distributions\nMy plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. \r\n\r\nEventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352.\r\n\r\nThis will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nDefines the core Plasma class used by PlasmaPy to represent plasma properties.\n\"\"\"\n\nimport numpy as np\nimport astropy.units as u\nimport itertools\n\nfrom plasmapy.formulary.magnetostatics import MagnetoStatics\n\nfrom astropy.constants import mu0\n\nfrom plasmapy.classes import GenericPlasma\n\n__all__ = [\n \"Plasma3D\"\n]\n\n\nclass Plasma3D(GenericPlasma):\n \"\"\"\n Core class for describing and calculating plasma parameters with\n spatial dimensions.\n\n Attributes\n ----------\n x : `astropy.units.Quantity`\n x-coordinates within the plasma domain. Equal to the\n `domain_x` input parameter.\n y : `astropy.units.Quantity`\n y-coordinates within the plasma domain. Equal to the\n `domain_y` input parameter.\n z : `astropy.units.Quantity`\n z-coordinates within the plasma domain. Equal to the\n `domain_z` input parameter.\n grid : `astropy.units.Quantity`\n (3, x, y, z) array containing the values of each coordinate at\n every point in the domain.\n domain_shape : tuple\n Shape of the plasma domain.\n density : `astropy.units.Quantity`\n (x, y, z) array of mass density at every point in the domain.\n momentum : `astropy.units.Quantity`\n (3, x, y, z) array of the momentum vector at every point in\n the domain.\n pressure : `astropy.units.Quantity`\n (x, y, z) array of pressure at every point in the domain.\n magnetic_field : `astropy.units.Quantity`\n (3, x, y, z) array of the magnetic field vector at every point\n in the domain.\n\n Parameters\n ----------\n domain_x : `astropy.units.Quantity`\n 1D array of x-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_y : `astropy.units.Quantity`\n 1D array of y-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_z : `astropy.units.Quantity`\n 1D array of z-coordinates for the plasma domain. Must have\n units convertable to length.\n\n \"\"\"\n @u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m)\n def __init__(self, domain_x, domain_y, domain_z):\n # Define domain sizes\n self.x = domain_x\n self.y = domain_y\n self.z = domain_z\n\n self.grid = np.array(np.meshgrid(self.x, self.y, self.z,\n indexing='ij'))\n self.domain_shape = (len(self.x), len(self.y), len(self.z))\n\n # Initiate core plasma variables\n self.density = np.zeros(self.domain_shape) * u.kg / u.m**3\n self.momentum = np.zeros((3, *self.domain_shape)) * u.kg / (u.m ** 2 * u.s)\n self.pressure = np.zeros(self.domain_shape) * u.Pa\n self.magnetic_field = np.zeros((3, *self.domain_shape)) * u.T\n self.electric_field = np.zeros((3, *self.domain_shape)) * u.V / u.m\n\n @property\n def velocity(self):\n return self.momentum / self.density\n\n @property\n def magnetic_field_strength(self):\n B = self.magnetic_field\n return np.sqrt(np.sum(B * B, axis=0))\n\n @property\n def electric_field_strength(self):\n E = self.electric_field\n return np.sqrt(np.sum(E * E, axis=0))\n\n @property\n def alfven_speed(self):\n B = self.magnetic_field\n rho = self.density\n return np.sqrt(np.sum(B * B, axis=0) / (mu0 * rho))\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if len(kwargs) == 3:\n match = all(f'domain_{direction}' in kwargs.keys() for direction in 'xyz')\n else:\n match = False\n return match\n\n def add_magnetostatic(self, *mstats: MagnetoStatics):\n # for each MagnetoStatic argument\n for mstat in mstats:\n # loop over 3D-index (ix,iy,iz)\n for point_index in itertools.product(*[list(range(n)) for n in self.domain_shape]):\n # get coordinate\n p = self.grid[(slice(None),)+point_index] # function as [:, *index]\n # calculate magnetic field at this point and add back\n self.magnetic_field[(slice(None),)+point_index] += mstat.magnetic_field(p)\n", "path": "plasmapy/classes/sources/plasma3d.py"}], "after_files": [{"content": "\"\"\"\nDefines the core Plasma class used by PlasmaPy to represent plasma properties.\n\"\"\"\n\nimport numpy as np\nimport astropy.units as u\nimport itertools\n\nfrom plasmapy.formulary.magnetostatics import MagnetoStatics\n\nfrom astropy.constants import mu0\n\nfrom plasmapy.classes import GenericPlasma\n\n__all__ = [\n \"Plasma3D\"\n]\n\n\nclass Plasma3D(GenericPlasma):\n \"\"\"\n Core class for describing and calculating plasma parameters with\n spatial dimensions.\n\n Attributes\n ----------\n x : `astropy.units.Quantity`\n x-coordinates within the plasma domain. Equal to the\n `domain_x` input parameter.\n y : `astropy.units.Quantity`\n y-coordinates within the plasma domain. Equal to the\n `domain_y` input parameter.\n z : `astropy.units.Quantity`\n z-coordinates within the plasma domain. Equal to the\n `domain_z` input parameter.\n grid : `astropy.units.Quantity`\n (3, x, y, z) array containing the values of each coordinate at\n every point in the domain.\n domain_shape : tuple\n Shape of the plasma domain.\n density : `astropy.units.Quantity`\n (x, y, z) array of mass density at every point in the domain.\n momentum : `astropy.units.Quantity`\n (3, x, y, z) array of the momentum vector at every point in\n the domain.\n pressure : `astropy.units.Quantity`\n (x, y, z) array of pressure at every point in the domain.\n magnetic_field : `astropy.units.Quantity`\n (3, x, y, z) array of the magnetic field vector at every point\n in the domain.\n\n Parameters\n ----------\n domain_x : `astropy.units.Quantity`\n 1D array of x-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_y : `astropy.units.Quantity`\n 1D array of y-coordinates for the plasma domain. Must have\n units convertable to length.\n domain_z : `astropy.units.Quantity`\n 1D array of z-coordinates for the plasma domain. Must have\n units convertable to length.\n\n \"\"\"\n @u.quantity_input(domain_x=u.m, domain_y=u.m, domain_z=u.m)\n def __init__(self, domain_x, domain_y, domain_z):\n # Define domain sizes\n self.x = domain_x\n self.y = domain_y\n self.z = domain_z\n\n self.grid = np.array(np.meshgrid(self.x, self.y, self.z,\n indexing='ij'))\n self.domain_shape = (len(self.x), len(self.y), len(self.z))\n\n # Initiate core plasma variables\n self.density = np.zeros(self.domain_shape) * u.kg / u.m**3\n self.momentum = np.zeros((3, *self.domain_shape)) * u.kg / (u.m ** 2 * u.s)\n self.pressure = np.zeros(self.domain_shape) * u.Pa\n self.magnetic_field = np.zeros((3, *self.domain_shape)) * u.T\n self.electric_field = np.zeros((3, *self.domain_shape)) * u.V / u.m\n\n @property\n def velocity(self):\n return self.momentum / self.density\n\n @property\n def magnetic_field_strength(self):\n B = self.magnetic_field\n return np.sqrt(np.sum(B * B, axis=0))\n\n @property\n def electric_field_strength(self):\n E = self.electric_field\n return np.sqrt(np.sum(E * E, axis=0))\n\n @property\n def alfven_speed(self):\n B = self.magnetic_field\n rho = self.density\n return np.sqrt(np.sum(B * B, axis=0) / (mu0 * rho))\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if len(kwargs) == 3:\n match = all(f'domain_{direction}' in kwargs.keys() for direction in 'xyz')\n else:\n match = False\n return match\n\n def add_magnetostatic(self, *mstats: MagnetoStatics):\n # for each MagnetoStatic argument\n prod = itertools.product(*[list(range(n)) for n in self.domain_shape])\n for mstat in mstats:\n # loop over 3D-index (ix,iy,iz)\n for point_index in prod:\n # get coordinate\n p = self.grid[(slice(None),)+point_index] # function as [:, *index]\n # calculate magnetic field at this point and add back\n self.magnetic_field[(slice(None),)+point_index] += mstat.magnetic_field(p)\n", "path": "plasmapy/classes/sources/plasma3d.py"}]} | 1,750 | 208 |
gh_patches_debug_1994 | rasdani/github-patches | git_diff | frappe__hrms-1526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organizational Chart: Total connections includes employees left
### Information about bug
<img width="329" alt="Screenshot 2024-03-08 at 11 20 37 AM" src="https://github.com/frappe/hrms/assets/20027965/b88248f8-502e-41fa-ba1a-87c0cd43165a">
The current system displays a total count of connections for each employee, including those who are no longer with the company. However, when viewing the connections, only active employees are shown.
**Expected Output:**
The count now reflects only active employees, ensuring consistency between the number displayed and the individuals visible upon selecting any employee.
### Module
HR
### Version
ERPNext: v14.x.x-develop () (develop)
Frappe Framework: v15.x.x-develop () (develop)
Frappe HR: v16.0.0-dev (develop)
### Installation method
manual install
### Relevant log output / Stack trace / Full Error Message.
_No response_
### Code of Conduct
- [x] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hrms/hr/page/organizational_chart/organizational_chart.py`
Content:
```
1 import frappe
2 from frappe.query_builder.functions import Count
3
4
5 @frappe.whitelist()
6 def get_children(parent=None, company=None, exclude_node=None):
7 filters = [["status", "=", "Active"]]
8 if company and company != "All Companies":
9 filters.append(["company", "=", company])
10
11 if parent and company and parent != company:
12 filters.append(["reports_to", "=", parent])
13 else:
14 filters.append(["reports_to", "=", ""])
15
16 if exclude_node:
17 filters.append(["name", "!=", exclude_node])
18
19 employees = frappe.get_all(
20 "Employee",
21 fields=[
22 "employee_name as name",
23 "name as id",
24 "lft",
25 "rgt",
26 "reports_to",
27 "image",
28 "designation as title",
29 ],
30 filters=filters,
31 order_by="name",
32 )
33
34 for employee in employees:
35 employee.connections = get_connections(employee.id, employee.lft, employee.rgt)
36 employee.expandable = bool(employee.connections)
37
38 return employees
39
40
41 def get_connections(employee: str, lft: int, rgt: int) -> int:
42 Employee = frappe.qb.DocType("Employee")
43 query = (
44 frappe.qb.from_(Employee)
45 .select(Count(Employee.name))
46 .where((Employee.lft > lft) & (Employee.rgt < rgt))
47 ).run()
48
49 return query[0][0]
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hrms/hr/page/organizational_chart/organizational_chart.py b/hrms/hr/page/organizational_chart/organizational_chart.py
--- a/hrms/hr/page/organizational_chart/organizational_chart.py
+++ b/hrms/hr/page/organizational_chart/organizational_chart.py
@@ -43,7 +43,7 @@
query = (
frappe.qb.from_(Employee)
.select(Count(Employee.name))
- .where((Employee.lft > lft) & (Employee.rgt < rgt))
+ .where((Employee.lft > lft) & (Employee.rgt < rgt) & (Employee.status == "Active"))
).run()
return query[0][0]
| {"golden_diff": "diff --git a/hrms/hr/page/organizational_chart/organizational_chart.py b/hrms/hr/page/organizational_chart/organizational_chart.py\n--- a/hrms/hr/page/organizational_chart/organizational_chart.py\n+++ b/hrms/hr/page/organizational_chart/organizational_chart.py\n@@ -43,7 +43,7 @@\n \tquery = (\n \t\tfrappe.qb.from_(Employee)\n \t\t.select(Count(Employee.name))\n-\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt))\n+\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt) & (Employee.status == \"Active\"))\n \t).run()\n \n \treturn query[0][0]\n", "issue": "Organizational Chart: Total connections includes employees left\n### Information about bug\n\n<img width=\"329\" alt=\"Screenshot 2024-03-08 at 11 20 37\u202fAM\" src=\"https://github.com/frappe/hrms/assets/20027965/b88248f8-502e-41fa-ba1a-87c0cd43165a\">\r\n\r\nThe current system displays a total count of connections for each employee, including those who are no longer with the company. However, when viewing the connections, only active employees are shown.\r\n\r\n**Expected Output:**\r\nThe count now reflects only active employees, ensuring consistency between the number displayed and the individuals visible upon selecting any employee.\n\n### Module\n\nHR\n\n### Version\n\nERPNext: v14.x.x-develop () (develop)\r\n\r\nFrappe Framework: v15.x.x-develop () (develop)\r\n\r\nFrappe HR: v16.0.0-dev (develop)\n\n### Installation method\n\nmanual install\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n\n### Code of Conduct\n\n- [x] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "import frappe\nfrom frappe.query_builder.functions import Count\n\n\[email protected]()\ndef get_children(parent=None, company=None, exclude_node=None):\n\tfilters = [[\"status\", \"=\", \"Active\"]]\n\tif company and company != \"All Companies\":\n\t\tfilters.append([\"company\", \"=\", company])\n\n\tif parent and company and parent != company:\n\t\tfilters.append([\"reports_to\", \"=\", parent])\n\telse:\n\t\tfilters.append([\"reports_to\", \"=\", \"\"])\n\n\tif exclude_node:\n\t\tfilters.append([\"name\", \"!=\", exclude_node])\n\n\temployees = frappe.get_all(\n\t\t\"Employee\",\n\t\tfields=[\n\t\t\t\"employee_name as name\",\n\t\t\t\"name as id\",\n\t\t\t\"lft\",\n\t\t\t\"rgt\",\n\t\t\t\"reports_to\",\n\t\t\t\"image\",\n\t\t\t\"designation as title\",\n\t\t],\n\t\tfilters=filters,\n\t\torder_by=\"name\",\n\t)\n\n\tfor employee in employees:\n\t\temployee.connections = get_connections(employee.id, employee.lft, employee.rgt)\n\t\temployee.expandable = bool(employee.connections)\n\n\treturn employees\n\n\ndef get_connections(employee: str, lft: int, rgt: int) -> int:\n\tEmployee = frappe.qb.DocType(\"Employee\")\n\tquery = (\n\t\tfrappe.qb.from_(Employee)\n\t\t.select(Count(Employee.name))\n\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt))\n\t).run()\n\n\treturn query[0][0]\n", "path": "hrms/hr/page/organizational_chart/organizational_chart.py"}], "after_files": [{"content": "import frappe\nfrom frappe.query_builder.functions import Count\n\n\[email protected]()\ndef get_children(parent=None, company=None, exclude_node=None):\n\tfilters = [[\"status\", \"=\", \"Active\"]]\n\tif company and company != \"All Companies\":\n\t\tfilters.append([\"company\", \"=\", company])\n\n\tif parent and company and parent != company:\n\t\tfilters.append([\"reports_to\", \"=\", parent])\n\telse:\n\t\tfilters.append([\"reports_to\", \"=\", \"\"])\n\n\tif exclude_node:\n\t\tfilters.append([\"name\", \"!=\", exclude_node])\n\n\temployees = frappe.get_all(\n\t\t\"Employee\",\n\t\tfields=[\n\t\t\t\"employee_name as name\",\n\t\t\t\"name as id\",\n\t\t\t\"lft\",\n\t\t\t\"rgt\",\n\t\t\t\"reports_to\",\n\t\t\t\"image\",\n\t\t\t\"designation as title\",\n\t\t],\n\t\tfilters=filters,\n\t\torder_by=\"name\",\n\t)\n\n\tfor employee in employees:\n\t\temployee.connections = get_connections(employee.id, employee.lft, employee.rgt)\n\t\temployee.expandable = bool(employee.connections)\n\n\treturn employees\n\n\ndef get_connections(employee: str, lft: int, rgt: int) -> int:\n\tEmployee = frappe.qb.DocType(\"Employee\")\n\tquery = (\n\t\tfrappe.qb.from_(Employee)\n\t\t.select(Count(Employee.name))\n\t\t.where((Employee.lft > lft) & (Employee.rgt < rgt) & (Employee.status == \"Active\"))\n\t).run()\n\n\treturn query[0][0]\n", "path": "hrms/hr/page/organizational_chart/organizational_chart.py"}]} | 950 | 162 |
gh_patches_debug_18503 | rasdani/github-patches | git_diff | bramstroker__homeassistant-powercalc-699 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dont create an EntityComponent for sensor
Hi Bram,
Ive been debuggin some difficult to find issues in Core currently, (more specific on the homeassistant.update_entity service, which errors out) and as it turns out it is caused by Powercalc, more specifically:
```
File "/config/custom_components/powercalc/__init__.py", line 223, in create_domain_groups
component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)
```
as Erik Montnemery put it: This is what powercalc does, and it's 100% wrong. And
> he can't create an EntityComponent for sensor
> that's done automatically by core for any integration which has sensor platforms already
Ive just been able to confirm the proper behavior of the homeassistant.update_entity service without Powercalc installed.
This is a very serious flaw in the system, interfering with core performance. Ive been forced to take out Powercalc now, to restore core functionality
please check that ?
full section in the homeassitant log:
```
2022-04-29 14:40:08 ERROR (MainThread) [custom_components.sensor] EntityComponent() called for sensor domain from
File "/usr/local/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/src/homeassistant/homeassistant/__main__.py", line 203, in <module>
sys.exit(main())
File "/usr/src/homeassistant/homeassistant/__main__.py", line 191, in main
exit_code = runner.run(runtime_conf)
File "/usr/src/homeassistant/homeassistant/runner.py", line 119, in run
return loop.run_until_complete(setup_and_run_hass(runtime_config))
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 629, in run_until_complete
self.run_forever()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 596, in run_forever
self._run_once()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 1890, in _run_once
handle._run()
File "/usr/local/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/config/custom_components/powercalc/__init__.py", line 153, in _create_domain_groups
await create_domain_groups(
File "/config/custom_components/powercalc/__init__.py", line 223, in create_domain_groups
component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)
```
thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/powercalc/__init__.py`
Content:
```
1 """The PowerCalc integration."""
2
3 from __future__ import annotations
4
5 import logging
6
7 import homeassistant.helpers.config_validation as cv
8 import homeassistant.helpers.entity_registry as er
9 import voluptuous as vol
10 from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
11 from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
12 from homeassistant.components.utility_meter import DEFAULT_OFFSET, max_28_days
13 from homeassistant.components.utility_meter.const import METER_TYPES
14 from homeassistant.const import (
15 CONF_ENTITY_ID,
16 CONF_SCAN_INTERVAL,
17 CONF_UNIQUE_ID,
18 EVENT_HOMEASSISTANT_STARTED,
19 )
20 from homeassistant.core import callback
21 from homeassistant.helpers import discovery
22 from homeassistant.helpers.entity_component import EntityComponent
23 from homeassistant.helpers.typing import HomeAssistantType
24
25 from .common import create_source_entity, validate_name_pattern
26 from .const import (
27 CONF_CREATE_DOMAIN_GROUPS,
28 CONF_CREATE_ENERGY_SENSORS,
29 CONF_CREATE_UTILITY_METERS,
30 CONF_ENABLE_AUTODISCOVERY,
31 CONF_ENERGY_INTEGRATION_METHOD,
32 CONF_ENERGY_SENSOR_CATEGORY,
33 CONF_ENERGY_SENSOR_NAMING,
34 CONF_ENERGY_SENSOR_PRECISION,
35 CONF_POWER_SENSOR_CATEGORY,
36 CONF_POWER_SENSOR_NAMING,
37 CONF_POWER_SENSOR_PRECISION,
38 CONF_UTILITY_METER_OFFSET,
39 CONF_UTILITY_METER_TARIFFS,
40 CONF_UTILITY_METER_TYPES,
41 DATA_CALCULATOR_FACTORY,
42 DATA_CONFIGURED_ENTITIES,
43 DATA_DISCOVERED_ENTITIES,
44 DATA_DOMAIN_ENTITIES,
45 DEFAULT_ENERGY_INTEGRATION_METHOD,
46 DEFAULT_ENERGY_NAME_PATTERN,
47 DEFAULT_ENERGY_SENSOR_PRECISION,
48 DEFAULT_ENTITY_CATEGORY,
49 DEFAULT_POWER_NAME_PATTERN,
50 DEFAULT_POWER_SENSOR_PRECISION,
51 DEFAULT_SCAN_INTERVAL,
52 DEFAULT_UTILITY_METER_TYPES,
53 DISCOVERY_LIGHT_MODEL,
54 DISCOVERY_SOURCE_ENTITY,
55 DOMAIN,
56 DOMAIN_CONFIG,
57 ENERGY_INTEGRATION_METHODS,
58 ENTITY_CATEGORIES,
59 )
60 from .errors import ModelNotSupported
61 from .model_discovery import get_light_model, is_supported_for_autodiscovery
62 from .sensors.group import create_group_sensors
63 from .strategy.factory import PowerCalculatorStrategyFactory
64
65 CONFIG_SCHEMA = vol.Schema(
66 {
67 DOMAIN: vol.All(
68 vol.Schema(
69 {
70 vol.Optional(
71 CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
72 ): cv.time_period,
73 vol.Optional(
74 CONF_POWER_SENSOR_NAMING, default=DEFAULT_POWER_NAME_PATTERN
75 ): validate_name_pattern,
76 vol.Optional(
77 CONF_POWER_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY
78 ): vol.In(ENTITY_CATEGORIES),
79 vol.Optional(
80 CONF_ENERGY_SENSOR_NAMING, default=DEFAULT_ENERGY_NAME_PATTERN
81 ): validate_name_pattern,
82 vol.Optional(
83 CONF_ENERGY_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY
84 ): vol.In(ENTITY_CATEGORIES),
85 vol.Optional(CONF_ENABLE_AUTODISCOVERY, default=True): cv.boolean,
86 vol.Optional(CONF_CREATE_ENERGY_SENSORS, default=True): cv.boolean,
87 vol.Optional(CONF_CREATE_UTILITY_METERS, default=False): cv.boolean,
88 vol.Optional(CONF_UTILITY_METER_TARIFFS, default=[]): vol.All(
89 cv.ensure_list, [cv.string]
90 ),
91 vol.Optional(
92 CONF_UTILITY_METER_TYPES, default=DEFAULT_UTILITY_METER_TYPES
93 ): vol.All(cv.ensure_list, [vol.In(METER_TYPES)]),
94 vol.Optional(
95 CONF_UTILITY_METER_OFFSET, default=DEFAULT_OFFSET
96 ): vol.All(cv.time_period, cv.positive_timedelta, max_28_days),
97 vol.Optional(
98 CONF_ENERGY_INTEGRATION_METHOD,
99 default=DEFAULT_ENERGY_INTEGRATION_METHOD,
100 ): vol.In(ENERGY_INTEGRATION_METHODS),
101 vol.Optional(
102 CONF_ENERGY_SENSOR_PRECISION,
103 default=DEFAULT_ENERGY_SENSOR_PRECISION,
104 ): cv.positive_int,
105 vol.Optional(
106 CONF_POWER_SENSOR_PRECISION,
107 default=DEFAULT_POWER_SENSOR_PRECISION,
108 ): cv.positive_int,
109 vol.Optional(CONF_CREATE_DOMAIN_GROUPS, default=[]): vol.All(
110 cv.ensure_list, [cv.string]
111 ),
112 }
113 ),
114 )
115 },
116 extra=vol.ALLOW_EXTRA,
117 )
118
119 _LOGGER = logging.getLogger(__name__)
120
121
122 async def async_setup(hass: HomeAssistantType, config: dict) -> bool:
123 domain_config = config.get(DOMAIN) or {
124 CONF_POWER_SENSOR_NAMING: DEFAULT_POWER_NAME_PATTERN,
125 CONF_POWER_SENSOR_PRECISION: DEFAULT_POWER_SENSOR_PRECISION,
126 CONF_POWER_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,
127 CONF_ENERGY_INTEGRATION_METHOD: DEFAULT_ENERGY_INTEGRATION_METHOD,
128 CONF_ENERGY_SENSOR_NAMING: DEFAULT_ENERGY_NAME_PATTERN,
129 CONF_ENERGY_SENSOR_PRECISION: DEFAULT_ENERGY_SENSOR_PRECISION,
130 CONF_ENERGY_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,
131 CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
132 CONF_CREATE_DOMAIN_GROUPS: [],
133 CONF_CREATE_ENERGY_SENSORS: True,
134 CONF_CREATE_UTILITY_METERS: False,
135 CONF_ENABLE_AUTODISCOVERY: True,
136 CONF_UTILITY_METER_OFFSET: DEFAULT_OFFSET,
137 CONF_UTILITY_METER_TYPES: DEFAULT_UTILITY_METER_TYPES,
138 }
139
140 hass.data[DOMAIN] = {
141 DATA_CALCULATOR_FACTORY: PowerCalculatorStrategyFactory(hass),
142 DOMAIN_CONFIG: domain_config,
143 DATA_CONFIGURED_ENTITIES: {},
144 DATA_DOMAIN_ENTITIES: {},
145 DATA_DISCOVERED_ENTITIES: [],
146 }
147
148 await autodiscover_entities(config, domain_config, hass)
149
150 if domain_config.get(CONF_CREATE_DOMAIN_GROUPS):
151
152 async def _create_domain_groups(event: None):
153 await create_domain_groups(
154 hass,
155 domain_config,
156 domain_config.get(CONF_CREATE_DOMAIN_GROUPS),
157 )
158
159 hass.bus.async_listen_once(
160 EVENT_HOMEASSISTANT_STARTED,
161 _create_domain_groups,
162 )
163
164 return True
165
166
167 async def autodiscover_entities(
168 config: dict, domain_config: dict, hass: HomeAssistantType
169 ):
170 """Discover entities supported for powercalc autoconfiguration in HA instance"""
171
172 if not domain_config.get(CONF_ENABLE_AUTODISCOVERY):
173 return
174
175 _LOGGER.debug("Start auto discovering entities")
176 entity_registry = await er.async_get_registry(hass)
177 for entity_entry in list(entity_registry.entities.values()):
178 if entity_entry.disabled:
179 continue
180
181 if entity_entry.domain != LIGHT_DOMAIN:
182 continue
183
184 if not await is_supported_for_autodiscovery(hass, entity_entry):
185 continue
186
187 source_entity = await create_source_entity(entity_entry.entity_id, hass)
188 try:
189 light_model = await get_light_model(hass, {}, source_entity.entity_entry)
190 if not light_model.is_autodiscovery_allowed:
191 _LOGGER.debug(
192 f"{entity_entry.entity_id}: Model found in database, but needs manual configuration"
193 )
194 continue
195 except ModelNotSupported:
196 _LOGGER.debug(
197 "%s: Model not found in library, skipping auto configuration",
198 entity_entry.entity_id,
199 )
200 continue
201
202 if not light_model:
203 continue
204
205 discovery_info = {
206 CONF_ENTITY_ID: entity_entry.entity_id,
207 DISCOVERY_SOURCE_ENTITY: source_entity,
208 DISCOVERY_LIGHT_MODEL: light_model,
209 }
210 hass.async_create_task(
211 discovery.async_load_platform(
212 hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config
213 )
214 )
215
216 _LOGGER.debug("Done auto discovering entities")
217
218
219 async def create_domain_groups(
220 hass: HomeAssistantType, global_config: dict, domains: list[str]
221 ):
222 """Create group sensors aggregating all power sensors from given domains"""
223 component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)
224 sensor_config = global_config.copy()
225 _LOGGER.debug(f"Setting up domain based group sensors..")
226 for domain in domains:
227 if not domain in hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES):
228 _LOGGER.error(f"Cannot setup group for domain {domain}, no entities found")
229 continue
230
231 domain_entities = hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES)[domain]
232 sensor_config[CONF_UNIQUE_ID] = f"powercalc_domaingroup_{domain}"
233 group_name = f"All {domain}"
234
235 entities = await create_group_sensors(
236 group_name, sensor_config, domain_entities, hass
237 )
238 await component.async_add_entities(entities)
239 return []
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/powercalc/__init__.py b/custom_components/powercalc/__init__.py
--- a/custom_components/powercalc/__init__.py
+++ b/custom_components/powercalc/__init__.py
@@ -220,7 +220,7 @@
hass: HomeAssistantType, global_config: dict, domains: list[str]
):
"""Create group sensors aggregating all power sensors from given domains"""
- component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)
+ sensor_component = hass.data[SENSOR_DOMAIN]
sensor_config = global_config.copy()
_LOGGER.debug(f"Setting up domain based group sensors..")
for domain in domains:
@@ -235,5 +235,5 @@
entities = await create_group_sensors(
group_name, sensor_config, domain_entities, hass
)
- await component.async_add_entities(entities)
+ await sensor_component.async_add_entities(entities)
return []
| {"golden_diff": "diff --git a/custom_components/powercalc/__init__.py b/custom_components/powercalc/__init__.py\n--- a/custom_components/powercalc/__init__.py\n+++ b/custom_components/powercalc/__init__.py\n@@ -220,7 +220,7 @@\n hass: HomeAssistantType, global_config: dict, domains: list[str]\n ):\n \"\"\"Create group sensors aggregating all power sensors from given domains\"\"\"\n- component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)\n+ sensor_component = hass.data[SENSOR_DOMAIN]\n sensor_config = global_config.copy()\n _LOGGER.debug(f\"Setting up domain based group sensors..\")\n for domain in domains:\n@@ -235,5 +235,5 @@\n entities = await create_group_sensors(\n group_name, sensor_config, domain_entities, hass\n )\n- await component.async_add_entities(entities)\n+ await sensor_component.async_add_entities(entities)\n return []\n", "issue": "dont create an EntityComponent for sensor\nHi Bram,\r\n\r\nIve been debuggin some difficult to find issues in Core currently, (more specific on the homeassistant.update_entity service, which errors out) and as it turns out it is caused by Powercalc, more specifically:\r\n\r\n```\r\nFile \"/config/custom_components/powercalc/__init__.py\", line 223, in create_domain_groups\r\n component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)\r\n```\r\n\r\nas Erik Montnemery put it: This is what powercalc does, and it's 100% wrong. And\r\n\r\n> he can't create an EntityComponent for sensor\r\n> that's done automatically by core for any integration which has sensor platforms already\r\n\r\n\r\nIve just been able to confirm the proper behavior of the homeassistant.update_entity service without Powercalc installed.\r\nThis is a very serious flaw in the system, interfering with core performance. Ive been forced to take out Powercalc now, to restore core functionality\r\n\r\nplease check that ?\r\n\r\nfull section in the homeassitant log:\r\n```\r\n2022-04-29 14:40:08 ERROR (MainThread) [custom_components.sensor] EntityComponent() called for sensor domain from\r\n File \"/usr/local/lib/python3.9/runpy.py\", line 197, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/local/lib/python3.9/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/src/homeassistant/homeassistant/__main__.py\", line 203, in <module>\r\n sys.exit(main())\r\n File \"/usr/src/homeassistant/homeassistant/__main__.py\", line 191, in main\r\n exit_code = runner.run(runtime_conf)\r\n File \"/usr/src/homeassistant/homeassistant/runner.py\", line 119, in run\r\n return loop.run_until_complete(setup_and_run_hass(runtime_config))\r\n File \"/usr/local/lib/python3.9/asyncio/base_events.py\", line 629, in run_until_complete\r\n self.run_forever()\r\n File \"/usr/local/lib/python3.9/asyncio/base_events.py\", line 596, in run_forever\r\n self._run_once()\r\n File \"/usr/local/lib/python3.9/asyncio/base_events.py\", line 1890, in _run_once\r\n handle._run()\r\n File \"/usr/local/lib/python3.9/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/config/custom_components/powercalc/__init__.py\", line 153, in _create_domain_groups\r\n await create_domain_groups(\r\n File \"/config/custom_components/powercalc/__init__.py\", line 223, in create_domain_groups\r\n component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)\r\n```\r\nthanks!\n", "before_files": [{"content": "\"\"\"The PowerCalc integration.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.helpers.entity_registry as er\nimport voluptuous as vol\nfrom homeassistant.components.light import DOMAIN as LIGHT_DOMAIN\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.utility_meter import DEFAULT_OFFSET, max_28_days\nfrom homeassistant.components.utility_meter.const import METER_TYPES\nfrom homeassistant.const import (\n CONF_ENTITY_ID,\n CONF_SCAN_INTERVAL,\n CONF_UNIQUE_ID,\n EVENT_HOMEASSISTANT_STARTED,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers import discovery\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.helpers.typing import HomeAssistantType\n\nfrom .common import create_source_entity, validate_name_pattern\nfrom .const import (\n CONF_CREATE_DOMAIN_GROUPS,\n CONF_CREATE_ENERGY_SENSORS,\n CONF_CREATE_UTILITY_METERS,\n CONF_ENABLE_AUTODISCOVERY,\n CONF_ENERGY_INTEGRATION_METHOD,\n CONF_ENERGY_SENSOR_CATEGORY,\n CONF_ENERGY_SENSOR_NAMING,\n CONF_ENERGY_SENSOR_PRECISION,\n CONF_POWER_SENSOR_CATEGORY,\n CONF_POWER_SENSOR_NAMING,\n CONF_POWER_SENSOR_PRECISION,\n CONF_UTILITY_METER_OFFSET,\n CONF_UTILITY_METER_TARIFFS,\n CONF_UTILITY_METER_TYPES,\n DATA_CALCULATOR_FACTORY,\n DATA_CONFIGURED_ENTITIES,\n DATA_DISCOVERED_ENTITIES,\n DATA_DOMAIN_ENTITIES,\n DEFAULT_ENERGY_INTEGRATION_METHOD,\n DEFAULT_ENERGY_NAME_PATTERN,\n DEFAULT_ENERGY_SENSOR_PRECISION,\n DEFAULT_ENTITY_CATEGORY,\n DEFAULT_POWER_NAME_PATTERN,\n DEFAULT_POWER_SENSOR_PRECISION,\n DEFAULT_SCAN_INTERVAL,\n DEFAULT_UTILITY_METER_TYPES,\n DISCOVERY_LIGHT_MODEL,\n DISCOVERY_SOURCE_ENTITY,\n DOMAIN,\n DOMAIN_CONFIG,\n ENERGY_INTEGRATION_METHODS,\n ENTITY_CATEGORIES,\n)\nfrom .errors import ModelNotSupported\nfrom .model_discovery import get_light_model, is_supported_for_autodiscovery\nfrom .sensors.group import create_group_sensors\nfrom .strategy.factory import PowerCalculatorStrategyFactory\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.All(\n vol.Schema(\n {\n vol.Optional(\n CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL\n ): cv.time_period,\n vol.Optional(\n CONF_POWER_SENSOR_NAMING, default=DEFAULT_POWER_NAME_PATTERN\n ): validate_name_pattern,\n vol.Optional(\n CONF_POWER_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY\n ): vol.In(ENTITY_CATEGORIES),\n vol.Optional(\n CONF_ENERGY_SENSOR_NAMING, default=DEFAULT_ENERGY_NAME_PATTERN\n ): validate_name_pattern,\n vol.Optional(\n CONF_ENERGY_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY\n ): vol.In(ENTITY_CATEGORIES),\n vol.Optional(CONF_ENABLE_AUTODISCOVERY, default=True): cv.boolean,\n vol.Optional(CONF_CREATE_ENERGY_SENSORS, default=True): cv.boolean,\n vol.Optional(CONF_CREATE_UTILITY_METERS, default=False): cv.boolean,\n vol.Optional(CONF_UTILITY_METER_TARIFFS, default=[]): vol.All(\n cv.ensure_list, [cv.string]\n ),\n vol.Optional(\n CONF_UTILITY_METER_TYPES, default=DEFAULT_UTILITY_METER_TYPES\n ): vol.All(cv.ensure_list, [vol.In(METER_TYPES)]),\n vol.Optional(\n CONF_UTILITY_METER_OFFSET, default=DEFAULT_OFFSET\n ): vol.All(cv.time_period, cv.positive_timedelta, max_28_days),\n vol.Optional(\n CONF_ENERGY_INTEGRATION_METHOD,\n default=DEFAULT_ENERGY_INTEGRATION_METHOD,\n ): vol.In(ENERGY_INTEGRATION_METHODS),\n vol.Optional(\n CONF_ENERGY_SENSOR_PRECISION,\n default=DEFAULT_ENERGY_SENSOR_PRECISION,\n ): cv.positive_int,\n vol.Optional(\n CONF_POWER_SENSOR_PRECISION,\n default=DEFAULT_POWER_SENSOR_PRECISION,\n ): cv.positive_int,\n vol.Optional(CONF_CREATE_DOMAIN_GROUPS, default=[]): vol.All(\n cv.ensure_list, [cv.string]\n ),\n }\n ),\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(hass: HomeAssistantType, config: dict) -> bool:\n domain_config = config.get(DOMAIN) or {\n CONF_POWER_SENSOR_NAMING: DEFAULT_POWER_NAME_PATTERN,\n CONF_POWER_SENSOR_PRECISION: DEFAULT_POWER_SENSOR_PRECISION,\n CONF_POWER_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,\n CONF_ENERGY_INTEGRATION_METHOD: DEFAULT_ENERGY_INTEGRATION_METHOD,\n CONF_ENERGY_SENSOR_NAMING: DEFAULT_ENERGY_NAME_PATTERN,\n CONF_ENERGY_SENSOR_PRECISION: DEFAULT_ENERGY_SENSOR_PRECISION,\n CONF_ENERGY_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,\n CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,\n CONF_CREATE_DOMAIN_GROUPS: [],\n CONF_CREATE_ENERGY_SENSORS: True,\n CONF_CREATE_UTILITY_METERS: False,\n CONF_ENABLE_AUTODISCOVERY: True,\n CONF_UTILITY_METER_OFFSET: DEFAULT_OFFSET,\n CONF_UTILITY_METER_TYPES: DEFAULT_UTILITY_METER_TYPES,\n }\n\n hass.data[DOMAIN] = {\n DATA_CALCULATOR_FACTORY: PowerCalculatorStrategyFactory(hass),\n DOMAIN_CONFIG: domain_config,\n DATA_CONFIGURED_ENTITIES: {},\n DATA_DOMAIN_ENTITIES: {},\n DATA_DISCOVERED_ENTITIES: [],\n }\n\n await autodiscover_entities(config, domain_config, hass)\n\n if domain_config.get(CONF_CREATE_DOMAIN_GROUPS):\n\n async def _create_domain_groups(event: None):\n await create_domain_groups(\n hass,\n domain_config,\n domain_config.get(CONF_CREATE_DOMAIN_GROUPS),\n )\n\n hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STARTED,\n _create_domain_groups,\n )\n\n return True\n\n\nasync def autodiscover_entities(\n config: dict, domain_config: dict, hass: HomeAssistantType\n):\n \"\"\"Discover entities supported for powercalc autoconfiguration in HA instance\"\"\"\n\n if not domain_config.get(CONF_ENABLE_AUTODISCOVERY):\n return\n\n _LOGGER.debug(\"Start auto discovering entities\")\n entity_registry = await er.async_get_registry(hass)\n for entity_entry in list(entity_registry.entities.values()):\n if entity_entry.disabled:\n continue\n\n if entity_entry.domain != LIGHT_DOMAIN:\n continue\n\n if not await is_supported_for_autodiscovery(hass, entity_entry):\n continue\n\n source_entity = await create_source_entity(entity_entry.entity_id, hass)\n try:\n light_model = await get_light_model(hass, {}, source_entity.entity_entry)\n if not light_model.is_autodiscovery_allowed:\n _LOGGER.debug(\n f\"{entity_entry.entity_id}: Model found in database, but needs manual configuration\"\n )\n continue\n except ModelNotSupported:\n _LOGGER.debug(\n \"%s: Model not found in library, skipping auto configuration\",\n entity_entry.entity_id,\n )\n continue\n\n if not light_model:\n continue\n\n discovery_info = {\n CONF_ENTITY_ID: entity_entry.entity_id,\n DISCOVERY_SOURCE_ENTITY: source_entity,\n DISCOVERY_LIGHT_MODEL: light_model,\n }\n hass.async_create_task(\n discovery.async_load_platform(\n hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config\n )\n )\n\n _LOGGER.debug(\"Done auto discovering entities\")\n\n\nasync def create_domain_groups(\n hass: HomeAssistantType, global_config: dict, domains: list[str]\n):\n \"\"\"Create group sensors aggregating all power sensors from given domains\"\"\"\n component = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass)\n sensor_config = global_config.copy()\n _LOGGER.debug(f\"Setting up domain based group sensors..\")\n for domain in domains:\n if not domain in hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES):\n _LOGGER.error(f\"Cannot setup group for domain {domain}, no entities found\")\n continue\n\n domain_entities = hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES)[domain]\n sensor_config[CONF_UNIQUE_ID] = f\"powercalc_domaingroup_{domain}\"\n group_name = f\"All {domain}\"\n\n entities = await create_group_sensors(\n group_name, sensor_config, domain_entities, hass\n )\n await component.async_add_entities(entities)\n return []\n", "path": "custom_components/powercalc/__init__.py"}], "after_files": [{"content": "\"\"\"The PowerCalc integration.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.helpers.entity_registry as er\nimport voluptuous as vol\nfrom homeassistant.components.light import DOMAIN as LIGHT_DOMAIN\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.utility_meter import DEFAULT_OFFSET, max_28_days\nfrom homeassistant.components.utility_meter.const import METER_TYPES\nfrom homeassistant.const import (\n CONF_ENTITY_ID,\n CONF_SCAN_INTERVAL,\n CONF_UNIQUE_ID,\n EVENT_HOMEASSISTANT_STARTED,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers import discovery\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.helpers.typing import HomeAssistantType\n\nfrom .common import create_source_entity, validate_name_pattern\nfrom .const import (\n CONF_CREATE_DOMAIN_GROUPS,\n CONF_CREATE_ENERGY_SENSORS,\n CONF_CREATE_UTILITY_METERS,\n CONF_ENABLE_AUTODISCOVERY,\n CONF_ENERGY_INTEGRATION_METHOD,\n CONF_ENERGY_SENSOR_CATEGORY,\n CONF_ENERGY_SENSOR_NAMING,\n CONF_ENERGY_SENSOR_PRECISION,\n CONF_POWER_SENSOR_CATEGORY,\n CONF_POWER_SENSOR_NAMING,\n CONF_POWER_SENSOR_PRECISION,\n CONF_UTILITY_METER_OFFSET,\n CONF_UTILITY_METER_TARIFFS,\n CONF_UTILITY_METER_TYPES,\n DATA_CALCULATOR_FACTORY,\n DATA_CONFIGURED_ENTITIES,\n DATA_DISCOVERED_ENTITIES,\n DATA_DOMAIN_ENTITIES,\n DEFAULT_ENERGY_INTEGRATION_METHOD,\n DEFAULT_ENERGY_NAME_PATTERN,\n DEFAULT_ENERGY_SENSOR_PRECISION,\n DEFAULT_ENTITY_CATEGORY,\n DEFAULT_POWER_NAME_PATTERN,\n DEFAULT_POWER_SENSOR_PRECISION,\n DEFAULT_SCAN_INTERVAL,\n DEFAULT_UTILITY_METER_TYPES,\n DISCOVERY_LIGHT_MODEL,\n DISCOVERY_SOURCE_ENTITY,\n DOMAIN,\n DOMAIN_CONFIG,\n ENERGY_INTEGRATION_METHODS,\n ENTITY_CATEGORIES,\n)\nfrom .errors import ModelNotSupported\nfrom .model_discovery import get_light_model, is_supported_for_autodiscovery\nfrom .sensors.group import create_group_sensors\nfrom .strategy.factory import PowerCalculatorStrategyFactory\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.All(\n vol.Schema(\n {\n vol.Optional(\n CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL\n ): cv.time_period,\n vol.Optional(\n CONF_POWER_SENSOR_NAMING, default=DEFAULT_POWER_NAME_PATTERN\n ): validate_name_pattern,\n vol.Optional(\n CONF_POWER_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY\n ): vol.In(ENTITY_CATEGORIES),\n vol.Optional(\n CONF_ENERGY_SENSOR_NAMING, default=DEFAULT_ENERGY_NAME_PATTERN\n ): validate_name_pattern,\n vol.Optional(\n CONF_ENERGY_SENSOR_CATEGORY, default=DEFAULT_ENTITY_CATEGORY\n ): vol.In(ENTITY_CATEGORIES),\n vol.Optional(CONF_ENABLE_AUTODISCOVERY, default=True): cv.boolean,\n vol.Optional(CONF_CREATE_ENERGY_SENSORS, default=True): cv.boolean,\n vol.Optional(CONF_CREATE_UTILITY_METERS, default=False): cv.boolean,\n vol.Optional(CONF_UTILITY_METER_TARIFFS, default=[]): vol.All(\n cv.ensure_list, [cv.string]\n ),\n vol.Optional(\n CONF_UTILITY_METER_TYPES, default=DEFAULT_UTILITY_METER_TYPES\n ): vol.All(cv.ensure_list, [vol.In(METER_TYPES)]),\n vol.Optional(\n CONF_UTILITY_METER_OFFSET, default=DEFAULT_OFFSET\n ): vol.All(cv.time_period, cv.positive_timedelta, max_28_days),\n vol.Optional(\n CONF_ENERGY_INTEGRATION_METHOD,\n default=DEFAULT_ENERGY_INTEGRATION_METHOD,\n ): vol.In(ENERGY_INTEGRATION_METHODS),\n vol.Optional(\n CONF_ENERGY_SENSOR_PRECISION,\n default=DEFAULT_ENERGY_SENSOR_PRECISION,\n ): cv.positive_int,\n vol.Optional(\n CONF_POWER_SENSOR_PRECISION,\n default=DEFAULT_POWER_SENSOR_PRECISION,\n ): cv.positive_int,\n vol.Optional(CONF_CREATE_DOMAIN_GROUPS, default=[]): vol.All(\n cv.ensure_list, [cv.string]\n ),\n }\n ),\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(hass: HomeAssistantType, config: dict) -> bool:\n domain_config = config.get(DOMAIN) or {\n CONF_POWER_SENSOR_NAMING: DEFAULT_POWER_NAME_PATTERN,\n CONF_POWER_SENSOR_PRECISION: DEFAULT_POWER_SENSOR_PRECISION,\n CONF_POWER_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,\n CONF_ENERGY_INTEGRATION_METHOD: DEFAULT_ENERGY_INTEGRATION_METHOD,\n CONF_ENERGY_SENSOR_NAMING: DEFAULT_ENERGY_NAME_PATTERN,\n CONF_ENERGY_SENSOR_PRECISION: DEFAULT_ENERGY_SENSOR_PRECISION,\n CONF_ENERGY_SENSOR_CATEGORY: DEFAULT_ENTITY_CATEGORY,\n CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,\n CONF_CREATE_DOMAIN_GROUPS: [],\n CONF_CREATE_ENERGY_SENSORS: True,\n CONF_CREATE_UTILITY_METERS: False,\n CONF_ENABLE_AUTODISCOVERY: True,\n CONF_UTILITY_METER_OFFSET: DEFAULT_OFFSET,\n CONF_UTILITY_METER_TYPES: DEFAULT_UTILITY_METER_TYPES,\n }\n\n hass.data[DOMAIN] = {\n DATA_CALCULATOR_FACTORY: PowerCalculatorStrategyFactory(hass),\n DOMAIN_CONFIG: domain_config,\n DATA_CONFIGURED_ENTITIES: {},\n DATA_DOMAIN_ENTITIES: {},\n DATA_DISCOVERED_ENTITIES: [],\n }\n\n await autodiscover_entities(config, domain_config, hass)\n\n if domain_config.get(CONF_CREATE_DOMAIN_GROUPS):\n\n async def _create_domain_groups(event: None):\n await create_domain_groups(\n hass,\n domain_config,\n domain_config.get(CONF_CREATE_DOMAIN_GROUPS),\n )\n\n hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STARTED,\n _create_domain_groups,\n )\n\n return True\n\n\nasync def autodiscover_entities(\n config: dict, domain_config: dict, hass: HomeAssistantType\n):\n \"\"\"Discover entities supported for powercalc autoconfiguration in HA instance\"\"\"\n\n if not domain_config.get(CONF_ENABLE_AUTODISCOVERY):\n return\n\n _LOGGER.debug(\"Start auto discovering entities\")\n entity_registry = await er.async_get_registry(hass)\n for entity_entry in list(entity_registry.entities.values()):\n if entity_entry.disabled:\n continue\n\n if entity_entry.domain != LIGHT_DOMAIN:\n continue\n\n if not await is_supported_for_autodiscovery(hass, entity_entry):\n continue\n\n source_entity = await create_source_entity(entity_entry.entity_id, hass)\n try:\n light_model = await get_light_model(hass, {}, source_entity.entity_entry)\n if not light_model.is_autodiscovery_allowed:\n _LOGGER.debug(\n f\"{entity_entry.entity_id}: Model found in database, but needs manual configuration\"\n )\n continue\n except ModelNotSupported:\n _LOGGER.debug(\n \"%s: Model not found in library, skipping auto configuration\",\n entity_entry.entity_id,\n )\n continue\n\n if not light_model:\n continue\n\n discovery_info = {\n CONF_ENTITY_ID: entity_entry.entity_id,\n DISCOVERY_SOURCE_ENTITY: source_entity,\n DISCOVERY_LIGHT_MODEL: light_model,\n }\n hass.async_create_task(\n discovery.async_load_platform(\n hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config\n )\n )\n\n _LOGGER.debug(\"Done auto discovering entities\")\n\n\nasync def create_domain_groups(\n hass: HomeAssistantType, global_config: dict, domains: list[str]\n):\n \"\"\"Create group sensors aggregating all power sensors from given domains\"\"\"\n sensor_component = hass.data[SENSOR_DOMAIN]\n sensor_config = global_config.copy()\n _LOGGER.debug(f\"Setting up domain based group sensors..\")\n for domain in domains:\n if not domain in hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES):\n _LOGGER.error(f\"Cannot setup group for domain {domain}, no entities found\")\n continue\n\n domain_entities = hass.data[DOMAIN].get(DATA_DOMAIN_ENTITIES)[domain]\n sensor_config[CONF_UNIQUE_ID] = f\"powercalc_domaingroup_{domain}\"\n group_name = f\"All {domain}\"\n\n entities = await create_group_sensors(\n group_name, sensor_config, domain_entities, hass\n )\n await sensor_component.async_add_entities(entities)\n return []\n", "path": "custom_components/powercalc/__init__.py"}]} | 3,286 | 202 |
gh_patches_debug_11872 | rasdani/github-patches | git_diff | googleapis__python-bigquery-465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bound the maximum supported Python version
Related: #462.
The BigQuery library does not support Python 3.9 yet, as the `bqstorage` and `pandas` extras are blocked by the `pyarrow` dependency. Pyarrow [added](https://issues.apache.org/jira/browse/ARROW-10224) the Python 3.9 support, but the change has not been released yet (awaiting `pyarrow==3.0.0`).
We need to reflect this in `setup.py`'s ``python_requires`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "six >=1.13.0,< 2.0.0dev",
37 "protobuf >= 3.12.0",
38 ]
39 extras = {
40 "bqstorage": [
41 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
42 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
43 # installed, even though `google-cloud-bigquery-storage` specifies it
44 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
45 # See: https://github.com/googleapis/python-bigquery/issues/83 The
46 # grpc.Channel.close() method isn't added until 1.32.0.
47 # https://github.com/grpc/grpc/pull/15254
48 "grpcio >= 1.32.0, < 2.0dev",
49 "pyarrow >= 1.0.0, < 3.0dev",
50 ],
51 "pandas": [
52 "pandas>=0.23.0",
53 # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.
54 "pyarrow >= 1.0.0, < 3.0dev",
55 ],
56 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
57 "opentelemetry": [
58 "opentelemetry-api==0.11b0",
59 "opentelemetry-sdk==0.11b0",
60 "opentelemetry-instrumentation==0.11b0",
61 ],
62 }
63
64 all_extras = []
65
66 for extra in extras:
67 all_extras.extend(extras[extra])
68
69 extras["all"] = all_extras
70
71 # Setup boilerplate below this line.
72
73 package_root = os.path.abspath(os.path.dirname(__file__))
74
75 readme_filename = os.path.join(package_root, "README.rst")
76 with io.open(readme_filename, encoding="utf-8") as readme_file:
77 readme = readme_file.read()
78
79 version = {}
80 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
81 exec(fp.read(), version)
82 version = version["__version__"]
83
84 # Only include packages under the 'google' namespace. Do not include tests,
85 # benchmarks, etc.
86 packages = [
87 package
88 for package in setuptools.PEP420PackageFinder.find()
89 if package.startswith("google")
90 ]
91
92 # Determine which namespaces are needed.
93 namespaces = ["google"]
94 if "google.cloud" in packages:
95 namespaces.append("google.cloud")
96
97
98 setuptools.setup(
99 name=name,
100 version=version,
101 description=description,
102 long_description=readme,
103 author="Google LLC",
104 author_email="[email protected]",
105 license="Apache 2.0",
106 url="https://github.com/googleapis/python-bigquery",
107 classifiers=[
108 release_status,
109 "Intended Audience :: Developers",
110 "License :: OSI Approved :: Apache Software License",
111 "Programming Language :: Python",
112 "Programming Language :: Python :: 3",
113 "Programming Language :: Python :: 3.6",
114 "Programming Language :: Python :: 3.7",
115 "Programming Language :: Python :: 3.8",
116 "Operating System :: OS Independent",
117 "Topic :: Internet",
118 ],
119 platforms="Posix; MacOS X; Windows",
120 packages=packages,
121 namespace_packages=namespaces,
122 install_requires=dependencies,
123 extras_require=extras,
124 python_requires=">=3.6",
125 include_package_data=True,
126 zip_safe=False,
127 )
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,6 @@
"proto-plus >= 1.10.0",
"google-cloud-core >= 1.4.1, < 2.0dev",
"google-resumable-media >= 0.6.0, < 2.0dev",
- "six >=1.13.0,< 2.0.0dev",
"protobuf >= 3.12.0",
]
extras = {
@@ -121,7 +120,7 @@
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
- python_requires=">=3.6",
+ python_requires=">=3.6, <3.9",
include_package_data=True,
zip_safe=False,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,6 @@\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n- \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n ]\n extras = {\n@@ -121,7 +120,7 @@\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n- python_requires=\">=3.6\",\n+ python_requires=\">=3.6, <3.9\",\n include_package_data=True,\n zip_safe=False,\n )\n", "issue": "Bound the maximum supported Python version\nRelated: #462.\r\n\r\nThe BigQuery library does not support Python 3.9 yet, as the `bqstorage` and `pandas` extras are blocked by the `pyarrow` dependency. Pyarrow [added](https://issues.apache.org/jira/browse/ARROW-10224) the Python 3.9 support, but the change has not been released yet (awaiting `pyarrow==3.0.0`).\r\n\r\nWe need to reflect this in `setup.py`'s ``python_requires`.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"six >=1.13.0,< 2.0.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"pandas\": [\n \"pandas>=0.23.0\",\n # pyarrow 1.0.0 is required for the use of timestamp_as_object keyword.\n \"pyarrow >= 1.0.0, < 3.0dev\",\n ],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.9\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,736 | 193 |
gh_patches_debug_33972 | rasdani/github-patches | git_diff | PrefectHQ__prefect-1972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Flow and Task identifying information to LogRecords
## Current behavior
- All log entries have the default [LogRecord attributes](https://docs.python.org/3/library/logging.html#logrecord-objects)...
- There is no universally identifiable flow slug
## Proposed behavior
...It would be useful to attach Core-specific information to log entries:
- `task_name` & `task_slug`: available only on entries emitted from Tasks and TaskRunners
- `flow_name` & `flow_slug`: available only on entries emitted from Flows and FlowRunners
The addition of the `flow_slug` makes is possible to uniquely identify flows other than by name.
## Example
One can now take a single stream of log entries during a local flow run and separate it by entity without parsing the `LogRecord.msg` field (which is unreliable). This makes is possible to build richer feedback interfaces for filtering, organizing, and otherwise selectively manipulating log entries.
Here is an example CLI interface that would be made possible by theses features:

(Given the following flow):
```python
from utils import seed, random_data
import prefect
@prefect.task
def extract():
return seed.get()
@prefect.task
def transform(seed):
logger = prefect.context.get("logger")
data = []
for x in range(10):
c_data = random_data(seed)
logger.info("debug info: {}".format(c_data))
data.append(c_data)
return data
@prefect.task
def load(data):
# do something with the data....
pass
with prefect.Flow("ETL") as flow:
e = extract()
t = transform(e)
load(t)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/utilities/logging.py`
Content:
```
1 """
2 Utility functions for interacting with and configuring logging. The main entrypoint for retrieving loggers for
3 customization is the `get_logger` utility.
4
5 Note that Prefect Tasks come equipped with their own loggers. These can be accessed via:
6 - `self.logger` if implementing a Task class
7 - `prefect.context.get("logger")` if using the `task` decorator
8
9 When running locally, log levels and message formatting are set via your Prefect configuration file.
10 """
11 import atexit
12 import json
13 import logging
14 import sys
15 import threading
16 import time
17 from queue import Queue, Empty
18 from typing import Any
19
20 import pendulum
21
22 import prefect
23 from prefect.utilities.context import context
24
25
26 class CloudHandler(logging.StreamHandler):
27 def __init__(self) -> None:
28 super().__init__(sys.stdout)
29 self.client = None
30 self.logger = logging.getLogger("CloudHandler")
31 handler = logging.StreamHandler(sys.stdout)
32 formatter = logging.Formatter(context.config.logging.format)
33 formatter.converter = time.gmtime # type: ignore
34 handler.setFormatter(formatter)
35 self.logger.addHandler(handler)
36 self.logger.setLevel(context.config.logging.level)
37
38 @property
39 def queue(self) -> Queue:
40 if not hasattr(self, "_queue"):
41 self._queue = Queue() # type: Queue
42 self._flush = False
43 self.start()
44 return self._queue
45
46 def flush(self) -> None:
47 self._flush = True
48 if self.client is not None:
49 self.batch_upload()
50 self._thread.join()
51
52 def batch_upload(self) -> None:
53 logs = []
54 try:
55 while True:
56 log = self.queue.get(False)
57 logs.append(log)
58 except Empty:
59 pass
60
61 if logs:
62 try:
63 assert self.client is not None
64 self.client.write_run_logs(logs)
65 except Exception as exc:
66 message = "Failed to write log with error: {}".format(str(exc))
67 self.logger.critical(message)
68
69 # Attempt to write batch error log otherwise log invalid cloud communication
70 try:
71 assert self.client is not None
72 self.client.write_run_logs([self._make_error_log(message)])
73 except Exception as exc:
74 self.logger.critical("Unable to write logs to Prefect Cloud")
75
76 def _monitor(self) -> None:
77 while not self._flush:
78 self.batch_upload()
79 time.sleep(self.heartbeat)
80
81 def __del__(self) -> None:
82 if hasattr(self, "_thread"):
83 self.flush()
84 atexit.unregister(self.flush)
85
86 def start(self) -> None:
87 if not hasattr(self, "_thread"):
88 self.heartbeat = context.config.cloud.logging_heartbeat
89 self._thread = t = threading.Thread(
90 target=self._monitor, name="PrefectCloudLoggingThread"
91 )
92 t.daemon = True
93 t.start()
94 atexit.register(self.flush)
95
96 def put(self, log: dict) -> None:
97 try:
98 json.dumps(log) # make sure the payload is serializable
99 self.queue.put(log)
100 except TypeError as exc:
101 message = "Failed to write log with error: {}".format(str(exc))
102 self.logger.critical(message)
103
104 self.queue.put(self._make_error_log(message))
105
106 def emit(self, record) -> None: # type: ignore
107 # if we shouldn't log to cloud, don't emit
108 if not prefect.context.config.logging.log_to_cloud:
109 return
110
111 try:
112 from prefect.client import Client
113
114 if self.client is None:
115 self.client = Client() # type: ignore
116
117 assert isinstance(self.client, Client) # mypy assert
118
119 record_dict = record.__dict__.copy()
120 log = dict()
121 log["flowRunId"] = prefect.context.get("flow_run_id", None)
122 log["taskRunId"] = prefect.context.get("task_run_id", None)
123 log["timestamp"] = pendulum.from_timestamp(
124 record_dict.pop("created", time.time())
125 ).isoformat()
126 log["name"] = record_dict.pop("name", None)
127 log["message"] = record_dict.pop("message", None)
128 log["level"] = record_dict.pop("levelname", None)
129
130 if record_dict.get("exc_text") is not None:
131 log["message"] += "\n" + record_dict.pop("exc_text", "")
132 record_dict.pop("exc_info", None)
133
134 log["info"] = record_dict
135 self.put(log)
136 except Exception as exc:
137 message = "Failed to write log with error: {}".format(str(exc))
138 self.logger.critical(message)
139
140 self.put(self._make_error_log(message))
141
142 def _make_error_log(self, message: str) -> dict:
143 log = dict()
144 log["flowRunId"] = prefect.context.get("flow_run_id", None)
145 log["timestamp"] = pendulum.from_timestamp(time.time()).isoformat()
146 log["name"] = self.logger.name
147 log["message"] = message
148 log["level"] = "CRITICAL"
149 log["info"] = {}
150
151 return log
152
153
154 def configure_logging(testing: bool = False) -> logging.Logger:
155 """
156 Creates a "prefect" root logger with a `StreamHandler` that has level and formatting
157 set from `prefect.config`.
158
159 Args:
160 - testing (bool, optional): a boolean specifying whether this configuration
161 is for testing purposes only; this helps us isolate any global state during testing
162 by configuring a "prefect-test-logger" instead of the standard "prefect" logger
163
164 Returns:
165 - logging.Logger: a configured logging object
166 """
167 name = "prefect-test-logger" if testing else "prefect"
168 logger = logging.getLogger(name)
169 handler = logging.StreamHandler(sys.stdout)
170 formatter = logging.Formatter(context.config.logging.format)
171 formatter.converter = time.gmtime # type: ignore
172 handler.setFormatter(formatter)
173 logger.addHandler(handler)
174 logger.setLevel(context.config.logging.level)
175
176 cloud_handler = CloudHandler()
177 cloud_handler.setLevel("DEBUG")
178 logger.addHandler(cloud_handler)
179 return logger
180
181
182 prefect_logger = configure_logging()
183
184
185 def get_logger(name: str = None) -> logging.Logger:
186 """
187 Returns a "prefect" logger.
188
189 Args:
190 - name (str): if `None`, the root Prefect logger is returned. If provided, a child
191 logger of the name `"prefect.{name}"` is returned. The child logger inherits
192 the root logger's settings.
193
194 Returns:
195 - logging.Logger: a configured logging object with the appropriate name
196 """
197 if name is None:
198 return prefect_logger
199 else:
200 return prefect_logger.getChild(name)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/utilities/logging.py b/src/prefect/utilities/logging.py
--- a/src/prefect/utilities/logging.py
+++ b/src/prefect/utilities/logging.py
@@ -23,6 +23,17 @@
from prefect.utilities.context import context
+_original_log_record_factory = logging.getLogRecordFactory()
+
+PREFECT_LOG_RECORD_ATTRIBUTES = (
+ "flow_name",
+ "flow_run_id",
+ "task_name",
+ "task_slug",
+ "task_run_id",
+)
+
+
class CloudHandler(logging.StreamHandler):
def __init__(self) -> None:
super().__init__(sys.stdout)
@@ -151,6 +162,27 @@
return log
+def _log_record_context_injector(*args: Any, **kwargs: Any) -> logging.LogRecord:
+ """
+ A custom logger LogRecord Factory that injects selected context parameters into newly created logs.
+
+ Args:
+ - *args: arguments to pass to the original LogRecord Factory
+ - **kwargs: keyword arguments to pass to the original LogRecord Factory
+
+ Returns:
+ - logging.LogRecord: the newly created LogRecord
+ """
+ record = _original_log_record_factory(*args, **kwargs)
+
+ for attr in PREFECT_LOG_RECORD_ATTRIBUTES:
+ value = prefect.context.get(attr, None)
+ if value:
+ setattr(record, attr, value)
+
+ return record
+
+
def configure_logging(testing: bool = False) -> logging.Logger:
"""
Creates a "prefect" root logger with a `StreamHandler` that has level and formatting
@@ -164,6 +196,8 @@
Returns:
- logging.Logger: a configured logging object
"""
+ logging.setLogRecordFactory(_log_record_context_injector)
+
name = "prefect-test-logger" if testing else "prefect"
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
@@ -194,6 +228,7 @@
Returns:
- logging.Logger: a configured logging object with the appropriate name
"""
+
if name is None:
return prefect_logger
else:
| {"golden_diff": "diff --git a/src/prefect/utilities/logging.py b/src/prefect/utilities/logging.py\n--- a/src/prefect/utilities/logging.py\n+++ b/src/prefect/utilities/logging.py\n@@ -23,6 +23,17 @@\n from prefect.utilities.context import context\n \n \n+_original_log_record_factory = logging.getLogRecordFactory()\n+\n+PREFECT_LOG_RECORD_ATTRIBUTES = (\n+ \"flow_name\",\n+ \"flow_run_id\",\n+ \"task_name\",\n+ \"task_slug\",\n+ \"task_run_id\",\n+)\n+\n+\n class CloudHandler(logging.StreamHandler):\n def __init__(self) -> None:\n super().__init__(sys.stdout)\n@@ -151,6 +162,27 @@\n return log\n \n \n+def _log_record_context_injector(*args: Any, **kwargs: Any) -> logging.LogRecord:\n+ \"\"\"\n+ A custom logger LogRecord Factory that injects selected context parameters into newly created logs.\n+\n+ Args:\n+ - *args: arguments to pass to the original LogRecord Factory\n+ - **kwargs: keyword arguments to pass to the original LogRecord Factory\n+\n+ Returns:\n+ - logging.LogRecord: the newly created LogRecord\n+ \"\"\"\n+ record = _original_log_record_factory(*args, **kwargs)\n+\n+ for attr in PREFECT_LOG_RECORD_ATTRIBUTES:\n+ value = prefect.context.get(attr, None)\n+ if value:\n+ setattr(record, attr, value)\n+\n+ return record\n+\n+\n def configure_logging(testing: bool = False) -> logging.Logger:\n \"\"\"\n Creates a \"prefect\" root logger with a `StreamHandler` that has level and formatting\n@@ -164,6 +196,8 @@\n Returns:\n - logging.Logger: a configured logging object\n \"\"\"\n+ logging.setLogRecordFactory(_log_record_context_injector)\n+\n name = \"prefect-test-logger\" if testing else \"prefect\"\n logger = logging.getLogger(name)\n handler = logging.StreamHandler(sys.stdout)\n@@ -194,6 +228,7 @@\n Returns:\n - logging.Logger: a configured logging object with the appropriate name\n \"\"\"\n+\n if name is None:\n return prefect_logger\n else:\n", "issue": "Add Flow and Task identifying information to LogRecords\n## Current behavior\r\n- All log entries have the default [LogRecord attributes](https://docs.python.org/3/library/logging.html#logrecord-objects)... \r\n- There is no universally identifiable flow slug\r\n\r\n## Proposed behavior\r\n...It would be useful to attach Core-specific information to log entries:\r\n- `task_name` & `task_slug`: available only on entries emitted from Tasks and TaskRunners\r\n- `flow_name` & `flow_slug`: available only on entries emitted from Flows and FlowRunners\r\n\r\nThe addition of the `flow_slug` makes is possible to uniquely identify flows other than by name.\r\n\r\n## Example\r\nOne can now take a single stream of log entries during a local flow run and separate it by entity without parsing the `LogRecord.msg` field (which is unreliable). This makes is possible to build richer feedback interfaces for filtering, organizing, and otherwise selectively manipulating log entries.\r\n\r\nHere is an example CLI interface that would be made possible by theses features:\r\n\r\n\r\n\r\n(Given the following flow):\r\n```python\r\nfrom utils import seed, random_data\r\nimport prefect\r\n\r\[email protected]\r\ndef extract():\r\n return seed.get()\r\n\r\[email protected]\r\ndef transform(seed):\r\n logger = prefect.context.get(\"logger\")\r\n data = []\r\n for x in range(10):\r\n c_data = random_data(seed)\r\n logger.info(\"debug info: {}\".format(c_data))\r\n data.append(c_data)\r\n return data\r\n\r\[email protected]\r\ndef load(data):\r\n # do something with the data....\r\n pass\r\n\r\nwith prefect.Flow(\"ETL\") as flow:\r\n e = extract()\r\n t = transform(e)\r\n load(t)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nUtility functions for interacting with and configuring logging. The main entrypoint for retrieving loggers for\ncustomization is the `get_logger` utility.\n\nNote that Prefect Tasks come equipped with their own loggers. These can be accessed via:\n - `self.logger` if implementing a Task class\n - `prefect.context.get(\"logger\")` if using the `task` decorator\n\nWhen running locally, log levels and message formatting are set via your Prefect configuration file.\n\"\"\"\nimport atexit\nimport json\nimport logging\nimport sys\nimport threading\nimport time\nfrom queue import Queue, Empty\nfrom typing import Any\n\nimport pendulum\n\nimport prefect\nfrom prefect.utilities.context import context\n\n\nclass CloudHandler(logging.StreamHandler):\n def __init__(self) -> None:\n super().__init__(sys.stdout)\n self.client = None\n self.logger = logging.getLogger(\"CloudHandler\")\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(context.config.logging.format)\n formatter.converter = time.gmtime # type: ignore\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.setLevel(context.config.logging.level)\n\n @property\n def queue(self) -> Queue:\n if not hasattr(self, \"_queue\"):\n self._queue = Queue() # type: Queue\n self._flush = False\n self.start()\n return self._queue\n\n def flush(self) -> None:\n self._flush = True\n if self.client is not None:\n self.batch_upload()\n self._thread.join()\n\n def batch_upload(self) -> None:\n logs = []\n try:\n while True:\n log = self.queue.get(False)\n logs.append(log)\n except Empty:\n pass\n\n if logs:\n try:\n assert self.client is not None\n self.client.write_run_logs(logs)\n except Exception as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n # Attempt to write batch error log otherwise log invalid cloud communication\n try:\n assert self.client is not None\n self.client.write_run_logs([self._make_error_log(message)])\n except Exception as exc:\n self.logger.critical(\"Unable to write logs to Prefect Cloud\")\n\n def _monitor(self) -> None:\n while not self._flush:\n self.batch_upload()\n time.sleep(self.heartbeat)\n\n def __del__(self) -> None:\n if hasattr(self, \"_thread\"):\n self.flush()\n atexit.unregister(self.flush)\n\n def start(self) -> None:\n if not hasattr(self, \"_thread\"):\n self.heartbeat = context.config.cloud.logging_heartbeat\n self._thread = t = threading.Thread(\n target=self._monitor, name=\"PrefectCloudLoggingThread\"\n )\n t.daemon = True\n t.start()\n atexit.register(self.flush)\n\n def put(self, log: dict) -> None:\n try:\n json.dumps(log) # make sure the payload is serializable\n self.queue.put(log)\n except TypeError as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n self.queue.put(self._make_error_log(message))\n\n def emit(self, record) -> None: # type: ignore\n # if we shouldn't log to cloud, don't emit\n if not prefect.context.config.logging.log_to_cloud:\n return\n\n try:\n from prefect.client import Client\n\n if self.client is None:\n self.client = Client() # type: ignore\n\n assert isinstance(self.client, Client) # mypy assert\n\n record_dict = record.__dict__.copy()\n log = dict()\n log[\"flowRunId\"] = prefect.context.get(\"flow_run_id\", None)\n log[\"taskRunId\"] = prefect.context.get(\"task_run_id\", None)\n log[\"timestamp\"] = pendulum.from_timestamp(\n record_dict.pop(\"created\", time.time())\n ).isoformat()\n log[\"name\"] = record_dict.pop(\"name\", None)\n log[\"message\"] = record_dict.pop(\"message\", None)\n log[\"level\"] = record_dict.pop(\"levelname\", None)\n\n if record_dict.get(\"exc_text\") is not None:\n log[\"message\"] += \"\\n\" + record_dict.pop(\"exc_text\", \"\")\n record_dict.pop(\"exc_info\", None)\n\n log[\"info\"] = record_dict\n self.put(log)\n except Exception as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n self.put(self._make_error_log(message))\n\n def _make_error_log(self, message: str) -> dict:\n log = dict()\n log[\"flowRunId\"] = prefect.context.get(\"flow_run_id\", None)\n log[\"timestamp\"] = pendulum.from_timestamp(time.time()).isoformat()\n log[\"name\"] = self.logger.name\n log[\"message\"] = message\n log[\"level\"] = \"CRITICAL\"\n log[\"info\"] = {}\n\n return log\n\n\ndef configure_logging(testing: bool = False) -> logging.Logger:\n \"\"\"\n Creates a \"prefect\" root logger with a `StreamHandler` that has level and formatting\n set from `prefect.config`.\n\n Args:\n - testing (bool, optional): a boolean specifying whether this configuration\n is for testing purposes only; this helps us isolate any global state during testing\n by configuring a \"prefect-test-logger\" instead of the standard \"prefect\" logger\n\n Returns:\n - logging.Logger: a configured logging object\n \"\"\"\n name = \"prefect-test-logger\" if testing else \"prefect\"\n logger = logging.getLogger(name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(context.config.logging.format)\n formatter.converter = time.gmtime # type: ignore\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(context.config.logging.level)\n\n cloud_handler = CloudHandler()\n cloud_handler.setLevel(\"DEBUG\")\n logger.addHandler(cloud_handler)\n return logger\n\n\nprefect_logger = configure_logging()\n\n\ndef get_logger(name: str = None) -> logging.Logger:\n \"\"\"\n Returns a \"prefect\" logger.\n\n Args:\n - name (str): if `None`, the root Prefect logger is returned. If provided, a child\n logger of the name `\"prefect.{name}\"` is returned. The child logger inherits\n the root logger's settings.\n\n Returns:\n - logging.Logger: a configured logging object with the appropriate name\n \"\"\"\n if name is None:\n return prefect_logger\n else:\n return prefect_logger.getChild(name)\n", "path": "src/prefect/utilities/logging.py"}], "after_files": [{"content": "\"\"\"\nUtility functions for interacting with and configuring logging. The main entrypoint for retrieving loggers for\ncustomization is the `get_logger` utility.\n\nNote that Prefect Tasks come equipped with their own loggers. These can be accessed via:\n - `self.logger` if implementing a Task class\n - `prefect.context.get(\"logger\")` if using the `task` decorator\n\nWhen running locally, log levels and message formatting are set via your Prefect configuration file.\n\"\"\"\nimport atexit\nimport json\nimport logging\nimport sys\nimport threading\nimport time\nfrom queue import Queue, Empty\nfrom typing import Any\n\nimport pendulum\n\nimport prefect\nfrom prefect.utilities.context import context\n\n\n_original_log_record_factory = logging.getLogRecordFactory()\n\nPREFECT_LOG_RECORD_ATTRIBUTES = (\n \"flow_name\",\n \"flow_run_id\",\n \"task_name\",\n \"task_slug\",\n \"task_run_id\",\n)\n\n\nclass CloudHandler(logging.StreamHandler):\n def __init__(self) -> None:\n super().__init__(sys.stdout)\n self.client = None\n self.logger = logging.getLogger(\"CloudHandler\")\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(context.config.logging.format)\n formatter.converter = time.gmtime # type: ignore\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.setLevel(context.config.logging.level)\n\n @property\n def queue(self) -> Queue:\n if not hasattr(self, \"_queue\"):\n self._queue = Queue() # type: Queue\n self._flush = False\n self.start()\n return self._queue\n\n def flush(self) -> None:\n self._flush = True\n if self.client is not None:\n self.batch_upload()\n self._thread.join()\n\n def batch_upload(self) -> None:\n logs = []\n try:\n while True:\n log = self.queue.get(False)\n logs.append(log)\n except Empty:\n pass\n\n if logs:\n try:\n assert self.client is not None\n self.client.write_run_logs(logs)\n except Exception as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n # Attempt to write batch error log otherwise log invalid cloud communication\n try:\n assert self.client is not None\n self.client.write_run_logs([self._make_error_log(message)])\n except Exception as exc:\n self.logger.critical(\"Unable to write logs to Prefect Cloud\")\n\n def _monitor(self) -> None:\n while not self._flush:\n self.batch_upload()\n time.sleep(self.heartbeat)\n\n def __del__(self) -> None:\n if hasattr(self, \"_thread\"):\n self.flush()\n atexit.unregister(self.flush)\n\n def start(self) -> None:\n if not hasattr(self, \"_thread\"):\n self.heartbeat = context.config.cloud.logging_heartbeat\n self._thread = t = threading.Thread(\n target=self._monitor, name=\"PrefectCloudLoggingThread\"\n )\n t.daemon = True\n t.start()\n atexit.register(self.flush)\n\n def put(self, log: dict) -> None:\n try:\n json.dumps(log) # make sure the payload is serializable\n self.queue.put(log)\n except TypeError as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n self.queue.put(self._make_error_log(message))\n\n def emit(self, record) -> None: # type: ignore\n # if we shouldn't log to cloud, don't emit\n if not prefect.context.config.logging.log_to_cloud:\n return\n\n try:\n from prefect.client import Client\n\n if self.client is None:\n self.client = Client() # type: ignore\n\n assert isinstance(self.client, Client) # mypy assert\n\n record_dict = record.__dict__.copy()\n log = dict()\n log[\"flowRunId\"] = prefect.context.get(\"flow_run_id\", None)\n log[\"taskRunId\"] = prefect.context.get(\"task_run_id\", None)\n log[\"timestamp\"] = pendulum.from_timestamp(\n record_dict.pop(\"created\", time.time())\n ).isoformat()\n log[\"name\"] = record_dict.pop(\"name\", None)\n log[\"message\"] = record_dict.pop(\"message\", None)\n log[\"level\"] = record_dict.pop(\"levelname\", None)\n\n if record_dict.get(\"exc_text\") is not None:\n log[\"message\"] += \"\\n\" + record_dict.pop(\"exc_text\", \"\")\n record_dict.pop(\"exc_info\", None)\n\n log[\"info\"] = record_dict\n self.put(log)\n except Exception as exc:\n message = \"Failed to write log with error: {}\".format(str(exc))\n self.logger.critical(message)\n\n self.put(self._make_error_log(message))\n\n def _make_error_log(self, message: str) -> dict:\n log = dict()\n log[\"flowRunId\"] = prefect.context.get(\"flow_run_id\", None)\n log[\"timestamp\"] = pendulum.from_timestamp(time.time()).isoformat()\n log[\"name\"] = self.logger.name\n log[\"message\"] = message\n log[\"level\"] = \"CRITICAL\"\n log[\"info\"] = {}\n\n return log\n\n\ndef _log_record_context_injector(*args: Any, **kwargs: Any) -> logging.LogRecord:\n \"\"\"\n A custom logger LogRecord Factory that injects selected context parameters into newly created logs.\n\n Args:\n - *args: arguments to pass to the original LogRecord Factory\n - **kwargs: keyword arguments to pass to the original LogRecord Factory\n\n Returns:\n - logging.LogRecord: the newly created LogRecord\n \"\"\"\n record = _original_log_record_factory(*args, **kwargs)\n\n for attr in PREFECT_LOG_RECORD_ATTRIBUTES:\n value = prefect.context.get(attr, None)\n if value:\n setattr(record, attr, value)\n\n return record\n\n\ndef configure_logging(testing: bool = False) -> logging.Logger:\n \"\"\"\n Creates a \"prefect\" root logger with a `StreamHandler` that has level and formatting\n set from `prefect.config`.\n\n Args:\n - testing (bool, optional): a boolean specifying whether this configuration\n is for testing purposes only; this helps us isolate any global state during testing\n by configuring a \"prefect-test-logger\" instead of the standard \"prefect\" logger\n\n Returns:\n - logging.Logger: a configured logging object\n \"\"\"\n logging.setLogRecordFactory(_log_record_context_injector)\n\n name = \"prefect-test-logger\" if testing else \"prefect\"\n logger = logging.getLogger(name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(context.config.logging.format)\n formatter.converter = time.gmtime # type: ignore\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(context.config.logging.level)\n\n cloud_handler = CloudHandler()\n cloud_handler.setLevel(\"DEBUG\")\n logger.addHandler(cloud_handler)\n return logger\n\n\nprefect_logger = configure_logging()\n\n\ndef get_logger(name: str = None) -> logging.Logger:\n \"\"\"\n Returns a \"prefect\" logger.\n\n Args:\n - name (str): if `None`, the root Prefect logger is returned. If provided, a child\n logger of the name `\"prefect.{name}\"` is returned. The child logger inherits\n the root logger's settings.\n\n Returns:\n - logging.Logger: a configured logging object with the appropriate name\n \"\"\"\n\n if name is None:\n return prefect_logger\n else:\n return prefect_logger.getChild(name)\n", "path": "src/prefect/utilities/logging.py"}]} | 2,623 | 495 |
gh_patches_debug_34108 | rasdani/github-patches | git_diff | pymedusa__Medusa-9720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postprocessing exception on Prowl notifications
Since updating to the latest commit earlier today, postprocessing throws an exception once it gets to the point of sending Prowl notifications (error below). Actual postprocessing seems to go fine--e.g., episode shows up in the db properly, etc. Never had issues with Prowl notifications before today. Tested two different shows and got the same error.
ETA: I actually turned Prowl notifications off and got this same error postprocessing another ep so... maybe it's not related to Prowl? Was just assuming because it was throwing something in prowl.py.
**To Reproduce**
Steps to reproduce the behavior:
1. Turn on Prowl notifications.
2. Postprocess an episode.
3. See error.
**Expected behavior**
I expect to get a Prowl notification that the download is complete and for Medusa not to throw any errors.
**Medusa (please complete the following information):**
- OS: Linux-3.2.40-i686-with-glibc2.0 (on a Synology NAS)
- Branch: master
- Commit: db102f3561204d388c3f4cbf6e89c79282ecd12f
- Python version: 3.7.10
- Database version: 44.18
<details>
2021-07-06 22:48:45 ERROR TORNADO :: [db102f3] API :: AttributeError("'list' object has no attribute 'series_id'")
Traceback (most recent call last):
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py", line 165, in get
out_dict = _call_dispatcher(args, kwargs)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py", line 235, in call_dispatcher
cur_out_dict = func(cur_args, cur_kwargs).run() # call function and get response
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py", line 1336, in run
proc_type=self.type
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py", line 207, in run
process_results.process(force=force, **kwargs)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py", line 355, in process
ignore_subs=ignore_subs)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py", line 627, in process_files
self.process_media(path, self.video_files, force, is_priority, ignore_subs)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py", line 849, in process_media
self.result = processor.process()
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/post_processor.py", line 1298, in process
notifiers.notify_download(ep_obj)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/__init__.py", line 103, in notify_download
n.notify_download(ep_obj)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/prowl.py", line 48, in notify_download
recipients = self._generate_recipients(show)
File "/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/prowl.py", line 107, in _generate_recipients
[show_obj.series_id, show_obj.indexer]
AttributeError: 'list' object has no attribute 'series_id'
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/notifiers/prowl.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import json
6 import logging
7 import socket
8 from builtins import object
9
10 from medusa import app, common, db
11 from medusa.logger.adapters.style import BraceAdapter
12
13 from requests.compat import urlencode
14
15 from six.moves.http_client import HTTPException, HTTPSConnection
16
17 try:
18 # this only exists in 2.6
19 from ssl import SSLError
20 except ImportError:
21 # make a fake one since I don't know what it is supposed to be in 2.5
22 class SSLError(Exception):
23 pass
24
25 log = BraceAdapter(logging.getLogger(__name__))
26 log.logger.addHandler(logging.NullHandler())
27
28
29 class Notifier(object):
30 def test_notify(self, prowl_api, prowl_priority):
31 return self._send_prowl(prowl_api, prowl_priority, event='Test', message='Testing Prowl settings from Medusa', force=True)
32
33 def notify_snatch(self, title, message, ep_obj):
34 if app.PROWL_NOTIFY_ONSNATCH:
35 recipients = self._generate_recipients(ep_obj.series)
36 if not recipients:
37 log.debug('Skipping prowl notify because there are no configured recipients')
38 else:
39 for api in recipients:
40 self._send_prowl(prowl_api=api, prowl_priority=None,
41 event=title,
42 message=message)
43
44 def notify_download(self, ep_obj):
45 ep_name = ep_obj.pretty_name_with_quality()
46 if app.PROWL_NOTIFY_ONDOWNLOAD:
47 show = self._parse_episode(ep_name)
48 recipients = self._generate_recipients(show)
49 if not recipients:
50 log.debug('Skipping prowl notify because there are no configured recipients')
51 else:
52 for api in recipients:
53 self._send_prowl(prowl_api=api, prowl_priority=None,
54 event=common.notifyStrings[common.NOTIFY_DOWNLOAD],
55 message=ep_name)
56
57 def notify_subtitle_download(self, ep_obj, lang):
58 ep_name = ep_obj.pretty_name()
59 if app.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
60 show = self._parse_episode(ep_name)
61 recipients = self._generate_recipients(show)
62 if not recipients:
63 log.debug('Skipping prowl notify because there are no configured recipients')
64 else:
65 for api in recipients:
66 self._send_prowl(prowl_api=api, prowl_priority=None,
67 event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],
68 message=ep_name + ' [' + lang + ']')
69
70 def notify_git_update(self, new_version='??'):
71 if app.USE_PROWL:
72 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
73 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
74 self._send_prowl(prowl_api=None, prowl_priority=None,
75 event=title, message=update_text + new_version)
76
77 def notify_login(self, ipaddress=''):
78 if app.USE_PROWL:
79 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
80 title = common.notifyStrings[common.NOTIFY_LOGIN]
81 self._send_prowl(prowl_api=None, prowl_priority=None,
82 event=title, message=update_text.format(ipaddress))
83
84 @staticmethod
85 def _generate_recipients(show_obj=None):
86 """
87 Generate a list of prowl recipients (api keys) for a specific show.
88
89 Search the tv_shows table for entries in the notify_list field.
90 :param show_obj: Show object.
91 """
92 apis = []
93 mydb = db.DBConnection()
94
95 # Grab the global recipient(s)
96 if app.PROWL_API:
97 for api in app.PROWL_API:
98 if api.strip():
99 apis.append(api)
100
101 # Grab the per-show-notification recipients
102 if show_obj is not None:
103 recipients = mydb.select(
104 'SELECT notify_list '
105 'FROM tv_shows '
106 'WHERE indexer_id = ? AND indexer = ? ',
107 [show_obj.series_id, show_obj.indexer]
108 )
109
110 for subs in recipients:
111 if subs['notify_list']:
112 entries = json.loads(subs['notify_list'])
113 if entries:
114 for api in entries['prowlAPIs'].split(','):
115 if api.strip():
116 apis.append(api)
117
118 apis = set(apis)
119 return apis
120
121 @staticmethod
122 def _send_prowl(prowl_api=None, prowl_priority=None, event=None, message=None, force=False):
123
124 if not app.USE_PROWL and not force:
125 return False
126
127 if prowl_api is None:
128 prowl_api = ','.join(app.PROWL_API)
129 if not prowl_api:
130 return False
131
132 if prowl_priority is None:
133 prowl_priority = app.PROWL_PRIORITY
134
135 title = app.PROWL_MESSAGE_TITLE
136
137 log.debug(u'PROWL: Sending notice with details: title="{0}" event="{1}", message="{2}", priority={3}, api={4}',
138 title, event, message, prowl_priority, prowl_api)
139
140 http_handler = HTTPSConnection('api.prowlapp.com')
141
142 data = {'apikey': prowl_api,
143 'application': title,
144 'event': event,
145 'description': message.encode('utf-8'),
146 'priority': prowl_priority}
147
148 try:
149 http_handler.request('POST',
150 '/publicapi/add',
151 headers={'Content-type': 'application/x-www-form-urlencoded'},
152 body=urlencode(data))
153 except (SSLError, HTTPException, socket.error):
154 log.error(u'Prowl notification failed.')
155 return False
156 response = http_handler.getresponse()
157 request_status = response.status
158
159 if request_status == 200:
160 log.info(u'Prowl notifications sent.')
161 return True
162 elif request_status == 401:
163 log.error(u'Prowl auth failed: {0}', response.reason)
164 return False
165 else:
166 log.error(u'Prowl notification failed.')
167 return False
168
169 @staticmethod
170 def _parse_episode(ep_name):
171 sep = ' - '
172 titles = ep_name.split(sep)
173 titles.sort(key=len, reverse=True)
174 log.debug('TITLES: {0}', titles)
175
176 return titles
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/notifiers/prowl.py b/medusa/notifiers/prowl.py
--- a/medusa/notifiers/prowl.py
+++ b/medusa/notifiers/prowl.py
@@ -42,30 +42,26 @@
message=message)
def notify_download(self, ep_obj):
- ep_name = ep_obj.pretty_name_with_quality()
if app.PROWL_NOTIFY_ONDOWNLOAD:
- show = self._parse_episode(ep_name)
- recipients = self._generate_recipients(show)
+ recipients = self._generate_recipients(ep_obj.series)
if not recipients:
log.debug('Skipping prowl notify because there are no configured recipients')
else:
for api in recipients:
self._send_prowl(prowl_api=api, prowl_priority=None,
event=common.notifyStrings[common.NOTIFY_DOWNLOAD],
- message=ep_name)
+ message=ep_obj.pretty_name_with_quality())
def notify_subtitle_download(self, ep_obj, lang):
- ep_name = ep_obj.pretty_name()
if app.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
- show = self._parse_episode(ep_name)
- recipients = self._generate_recipients(show)
+ recipients = self._generate_recipients(ep_obj.series)
if not recipients:
log.debug('Skipping prowl notify because there are no configured recipients')
else:
for api in recipients:
self._send_prowl(prowl_api=api, prowl_priority=None,
event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],
- message=ep_name + ' [' + lang + ']')
+ message=f'{ep_obj.pretty_name()} [{lang}]')
def notify_git_update(self, new_version='??'):
if app.USE_PROWL:
| {"golden_diff": "diff --git a/medusa/notifiers/prowl.py b/medusa/notifiers/prowl.py\n--- a/medusa/notifiers/prowl.py\n+++ b/medusa/notifiers/prowl.py\n@@ -42,30 +42,26 @@\n message=message)\n \n def notify_download(self, ep_obj):\n- ep_name = ep_obj.pretty_name_with_quality()\n if app.PROWL_NOTIFY_ONDOWNLOAD:\n- show = self._parse_episode(ep_name)\n- recipients = self._generate_recipients(show)\n+ recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n- message=ep_name)\n+ message=ep_obj.pretty_name_with_quality())\n \n def notify_subtitle_download(self, ep_obj, lang):\n- ep_name = ep_obj.pretty_name()\n if app.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n- show = self._parse_episode(ep_name)\n- recipients = self._generate_recipients(show)\n+ recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n- message=ep_name + ' [' + lang + ']')\n+ message=f'{ep_obj.pretty_name()} [{lang}]')\n \n def notify_git_update(self, new_version='??'):\n if app.USE_PROWL:\n", "issue": "Postprocessing exception on Prowl notifications\nSince updating to the latest commit earlier today, postprocessing throws an exception once it gets to the point of sending Prowl notifications (error below). Actual postprocessing seems to go fine--e.g., episode shows up in the db properly, etc. Never had issues with Prowl notifications before today. Tested two different shows and got the same error.\r\n\r\nETA: I actually turned Prowl notifications off and got this same error postprocessing another ep so... maybe it's not related to Prowl? Was just assuming because it was throwing something in prowl.py. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Turn on Prowl notifications.\r\n2. Postprocess an episode.\r\n3. See error.\r\n\r\n**Expected behavior**\r\nI expect to get a Prowl notification that the download is complete and for Medusa not to throw any errors.\r\n\r\n\r\n**Medusa (please complete the following information):**\r\n - OS: Linux-3.2.40-i686-with-glibc2.0 (on a Synology NAS)\r\n - Branch: master\r\n - Commit: db102f3561204d388c3f4cbf6e89c79282ecd12f\r\n - Python version: 3.7.10\r\n - Database version: 44.18\r\n\r\n<details>\r\n\r\n2021-07-06 22:48:45 ERROR TORNADO :: [db102f3] API :: AttributeError(\"'list' object has no attribute 'series_id'\")\r\nTraceback (most recent call last):\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py\", line 165, in get\r\n out_dict = _call_dispatcher(args, kwargs)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py\", line 235, in call_dispatcher\r\n cur_out_dict = func(cur_args, cur_kwargs).run() # call function and get response\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/server/api/v1/core.py\", line 1336, in run\r\n proc_type=self.type\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py\", line 207, in run\r\n process_results.process(force=force, **kwargs)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py\", line 355, in process\r\n ignore_subs=ignore_subs)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py\", line 627, in process_files\r\n self.process_media(path, self.video_files, force, is_priority, ignore_subs)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/process_tv.py\", line 849, in process_media\r\n self.result = processor.process()\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/post_processor.py\", line 1298, in process\r\n notifiers.notify_download(ep_obj)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/__init__.py\", line 103, in notify_download\r\n n.notify_download(ep_obj)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/prowl.py\", line 48, in notify_download\r\n recipients = self._generate_recipients(show)\r\n File \"/volume1/@appstore/sickbeard-custom/var/SickBeard/medusa/notifiers/prowl.py\", line 107, in _generate_recipients\r\n [show_obj.series_id, show_obj.indexer]\r\nAttributeError: 'list' object has no attribute 'series_id'\r\n</details>\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport socket\nfrom builtins import object\n\nfrom medusa import app, common, db\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom requests.compat import urlencode\n\nfrom six.moves.http_client import HTTPException, HTTPSConnection\n\ntry:\n # this only exists in 2.6\n from ssl import SSLError\nexcept ImportError:\n # make a fake one since I don't know what it is supposed to be in 2.5\n class SSLError(Exception):\n pass\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, prowl_api, prowl_priority):\n return self._send_prowl(prowl_api, prowl_priority, event='Test', message='Testing Prowl settings from Medusa', force=True)\n\n def notify_snatch(self, title, message, ep_obj):\n if app.PROWL_NOTIFY_ONSNATCH:\n recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=title,\n message=message)\n\n def notify_download(self, ep_obj):\n ep_name = ep_obj.pretty_name_with_quality()\n if app.PROWL_NOTIFY_ONDOWNLOAD:\n show = self._parse_episode(ep_name)\n recipients = self._generate_recipients(show)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n message=ep_name)\n\n def notify_subtitle_download(self, ep_obj, lang):\n ep_name = ep_obj.pretty_name()\n if app.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n show = self._parse_episode(ep_name)\n recipients = self._generate_recipients(show)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n message=ep_name + ' [' + lang + ']')\n\n def notify_git_update(self, new_version='??'):\n if app.USE_PROWL:\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._send_prowl(prowl_api=None, prowl_priority=None,\n event=title, message=update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_PROWL:\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._send_prowl(prowl_api=None, prowl_priority=None,\n event=title, message=update_text.format(ipaddress))\n\n @staticmethod\n def _generate_recipients(show_obj=None):\n \"\"\"\n Generate a list of prowl recipients (api keys) for a specific show.\n\n Search the tv_shows table for entries in the notify_list field.\n :param show_obj: Show object.\n \"\"\"\n apis = []\n mydb = db.DBConnection()\n\n # Grab the global recipient(s)\n if app.PROWL_API:\n for api in app.PROWL_API:\n if api.strip():\n apis.append(api)\n\n # Grab the per-show-notification recipients\n if show_obj is not None:\n recipients = mydb.select(\n 'SELECT notify_list '\n 'FROM tv_shows '\n 'WHERE indexer_id = ? AND indexer = ? ',\n [show_obj.series_id, show_obj.indexer]\n )\n\n for subs in recipients:\n if subs['notify_list']:\n entries = json.loads(subs['notify_list'])\n if entries:\n for api in entries['prowlAPIs'].split(','):\n if api.strip():\n apis.append(api)\n\n apis = set(apis)\n return apis\n\n @staticmethod\n def _send_prowl(prowl_api=None, prowl_priority=None, event=None, message=None, force=False):\n\n if not app.USE_PROWL and not force:\n return False\n\n if prowl_api is None:\n prowl_api = ','.join(app.PROWL_API)\n if not prowl_api:\n return False\n\n if prowl_priority is None:\n prowl_priority = app.PROWL_PRIORITY\n\n title = app.PROWL_MESSAGE_TITLE\n\n log.debug(u'PROWL: Sending notice with details: title=\"{0}\" event=\"{1}\", message=\"{2}\", priority={3}, api={4}',\n title, event, message, prowl_priority, prowl_api)\n\n http_handler = HTTPSConnection('api.prowlapp.com')\n\n data = {'apikey': prowl_api,\n 'application': title,\n 'event': event,\n 'description': message.encode('utf-8'),\n 'priority': prowl_priority}\n\n try:\n http_handler.request('POST',\n '/publicapi/add',\n headers={'Content-type': 'application/x-www-form-urlencoded'},\n body=urlencode(data))\n except (SSLError, HTTPException, socket.error):\n log.error(u'Prowl notification failed.')\n return False\n response = http_handler.getresponse()\n request_status = response.status\n\n if request_status == 200:\n log.info(u'Prowl notifications sent.')\n return True\n elif request_status == 401:\n log.error(u'Prowl auth failed: {0}', response.reason)\n return False\n else:\n log.error(u'Prowl notification failed.')\n return False\n\n @staticmethod\n def _parse_episode(ep_name):\n sep = ' - '\n titles = ep_name.split(sep)\n titles.sort(key=len, reverse=True)\n log.debug('TITLES: {0}', titles)\n\n return titles\n", "path": "medusa/notifiers/prowl.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport socket\nfrom builtins import object\n\nfrom medusa import app, common, db\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom requests.compat import urlencode\n\nfrom six.moves.http_client import HTTPException, HTTPSConnection\n\ntry:\n # this only exists in 2.6\n from ssl import SSLError\nexcept ImportError:\n # make a fake one since I don't know what it is supposed to be in 2.5\n class SSLError(Exception):\n pass\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, prowl_api, prowl_priority):\n return self._send_prowl(prowl_api, prowl_priority, event='Test', message='Testing Prowl settings from Medusa', force=True)\n\n def notify_snatch(self, title, message, ep_obj):\n if app.PROWL_NOTIFY_ONSNATCH:\n recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=title,\n message=message)\n\n def notify_download(self, ep_obj):\n if app.PROWL_NOTIFY_ONDOWNLOAD:\n recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_DOWNLOAD],\n message=ep_obj.pretty_name_with_quality())\n\n def notify_subtitle_download(self, ep_obj, lang):\n if app.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n recipients = self._generate_recipients(ep_obj.series)\n if not recipients:\n log.debug('Skipping prowl notify because there are no configured recipients')\n else:\n for api in recipients:\n self._send_prowl(prowl_api=api, prowl_priority=None,\n event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],\n message=f'{ep_obj.pretty_name()} [{lang}]')\n\n def notify_git_update(self, new_version='??'):\n if app.USE_PROWL:\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._send_prowl(prowl_api=None, prowl_priority=None,\n event=title, message=update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_PROWL:\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._send_prowl(prowl_api=None, prowl_priority=None,\n event=title, message=update_text.format(ipaddress))\n\n @staticmethod\n def _generate_recipients(show_obj=None):\n \"\"\"\n Generate a list of prowl recipients (api keys) for a specific show.\n\n Search the tv_shows table for entries in the notify_list field.\n :param show_obj: Show object.\n \"\"\"\n apis = []\n mydb = db.DBConnection()\n\n # Grab the global recipient(s)\n if app.PROWL_API:\n for api in app.PROWL_API:\n if api.strip():\n apis.append(api)\n\n # Grab the per-show-notification recipients\n if show_obj is not None:\n recipients = mydb.select(\n 'SELECT notify_list '\n 'FROM tv_shows '\n 'WHERE indexer_id = ? AND indexer = ? ',\n [show_obj.series_id, show_obj.indexer]\n )\n\n for subs in recipients:\n if subs['notify_list']:\n entries = json.loads(subs['notify_list'])\n if entries:\n for api in entries['prowlAPIs'].split(','):\n if api.strip():\n apis.append(api)\n\n apis = set(apis)\n return apis\n\n @staticmethod\n def _send_prowl(prowl_api=None, prowl_priority=None, event=None, message=None, force=False):\n\n if not app.USE_PROWL and not force:\n return False\n\n if prowl_api is None:\n prowl_api = ','.join(app.PROWL_API)\n if not prowl_api:\n return False\n\n if prowl_priority is None:\n prowl_priority = app.PROWL_PRIORITY\n\n title = app.PROWL_MESSAGE_TITLE\n\n log.debug(u'PROWL: Sending notice with details: title=\"{0}\" event=\"{1}\", message=\"{2}\", priority={3}, api={4}',\n title, event, message, prowl_priority, prowl_api)\n\n http_handler = HTTPSConnection('api.prowlapp.com')\n\n data = {'apikey': prowl_api,\n 'application': title,\n 'event': event,\n 'description': message.encode('utf-8'),\n 'priority': prowl_priority}\n\n try:\n http_handler.request('POST',\n '/publicapi/add',\n headers={'Content-type': 'application/x-www-form-urlencoded'},\n body=urlencode(data))\n except (SSLError, HTTPException, socket.error):\n log.error(u'Prowl notification failed.')\n return False\n response = http_handler.getresponse()\n request_status = response.status\n\n if request_status == 200:\n log.info(u'Prowl notifications sent.')\n return True\n elif request_status == 401:\n log.error(u'Prowl auth failed: {0}', response.reason)\n return False\n else:\n log.error(u'Prowl notification failed.')\n return False\n\n @staticmethod\n def _parse_episode(ep_name):\n sep = ' - '\n titles = ep_name.split(sep)\n titles.sort(key=len, reverse=True)\n log.debug('TITLES: {0}', titles)\n\n return titles\n", "path": "medusa/notifiers/prowl.py"}]} | 2,943 | 395 |
gh_patches_debug_10629 | rasdani/github-patches | git_diff | jumpserver__jumpserver-138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
change(version num) change jumpserver version
from 0.3.0 -> 0.3.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jperm/models.py`
Content:
```
1 import datetime
2
3 from django.db import models
4 from jasset.models import Asset, AssetGroup
5 from juser.models import User, UserGroup
6
7
8 class PermLog(models.Model):
9 datetime = models.DateTimeField(auto_now_add=True)
10 action = models.CharField(max_length=100, null=True, blank=True, default='')
11 results = models.CharField(max_length=1000, null=True, blank=True, default='')
12 is_success = models.BooleanField(default=False)
13 is_finish = models.BooleanField(default=False)
14
15
16 class PermSudo(models.Model):
17 name = models.CharField(max_length=100, unique=True)
18 date_added = models.DateTimeField(auto_now=True)
19 commands = models.TextField()
20 comment = models.CharField(max_length=100, null=True, blank=True, default='')
21
22 def __unicode__(self):
23 return self.name
24
25
26 class PermRole(models.Model):
27 name = models.CharField(max_length=100, unique=True)
28 comment = models.CharField(max_length=100, null=True, blank=True, default='')
29 password = models.CharField(max_length=100)
30 key_path = models.CharField(max_length=100)
31 date_added = models.DateTimeField(auto_now=True)
32 sudo = models.ManyToManyField(PermSudo, related_name='perm_role')
33
34 def __unicode__(self):
35 return self.name
36
37
38 class PermRule(models.Model):
39 date_added = models.DateTimeField(auto_now=True)
40 name = models.CharField(max_length=100, unique=True)
41 comment = models.CharField(max_length=100)
42 asset = models.ManyToManyField(Asset, related_name='perm_rule')
43 asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule')
44 user = models.ManyToManyField(User, related_name='perm_rule')
45 user_group = models.ManyToManyField(UserGroup, related_name='perm_rule')
46 role = models.ManyToManyField(PermRole, related_name='perm_rule')
47
48 def __unicode__(self):
49 return self.name
50
51
52 class PermPush(models.Model):
53 asset = models.ForeignKey(Asset, related_name='perm_push')
54 role = models.ForeignKey(PermRole, related_name='perm_push')
55 is_public_key = models.BooleanField(default=False)
56 is_password = models.BooleanField(default=False)
57 success = models.BooleanField(default=False)
58 result = models.TextField(default='')
59 date_added = models.DateTimeField(auto_now=True)
60
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jperm/models.py b/jperm/models.py
--- a/jperm/models.py
+++ b/jperm/models.py
@@ -26,7 +26,7 @@
class PermRole(models.Model):
name = models.CharField(max_length=100, unique=True)
comment = models.CharField(max_length=100, null=True, blank=True, default='')
- password = models.CharField(max_length=100)
+ password = models.CharField(max_length=128)
key_path = models.CharField(max_length=100)
date_added = models.DateTimeField(auto_now=True)
sudo = models.ManyToManyField(PermSudo, related_name='perm_role')
| {"golden_diff": "diff --git a/jperm/models.py b/jperm/models.py\n--- a/jperm/models.py\n+++ b/jperm/models.py\n@@ -26,7 +26,7 @@\n class PermRole(models.Model):\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n- password = models.CharField(max_length=100)\n+ password = models.CharField(max_length=128)\n key_path = models.CharField(max_length=100)\n date_added = models.DateTimeField(auto_now=True)\n sudo = models.ManyToManyField(PermSudo, related_name='perm_role')\n", "issue": "change(version num) change jumpserver version\nfrom 0.3.0 -> 0.3.1\n\n", "before_files": [{"content": "import datetime\n\nfrom django.db import models\nfrom jasset.models import Asset, AssetGroup\nfrom juser.models import User, UserGroup\n\n\nclass PermLog(models.Model):\n datetime = models.DateTimeField(auto_now_add=True)\n action = models.CharField(max_length=100, null=True, blank=True, default='')\n results = models.CharField(max_length=1000, null=True, blank=True, default='')\n is_success = models.BooleanField(default=False)\n is_finish = models.BooleanField(default=False)\n\n\nclass PermSudo(models.Model):\n name = models.CharField(max_length=100, unique=True)\n date_added = models.DateTimeField(auto_now=True)\n commands = models.TextField()\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRole(models.Model):\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n password = models.CharField(max_length=100)\n key_path = models.CharField(max_length=100)\n date_added = models.DateTimeField(auto_now=True)\n sudo = models.ManyToManyField(PermSudo, related_name='perm_role')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRule(models.Model):\n date_added = models.DateTimeField(auto_now=True)\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100)\n asset = models.ManyToManyField(Asset, related_name='perm_rule')\n asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule')\n user = models.ManyToManyField(User, related_name='perm_rule')\n user_group = models.ManyToManyField(UserGroup, related_name='perm_rule')\n role = models.ManyToManyField(PermRole, related_name='perm_rule')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermPush(models.Model):\n asset = models.ForeignKey(Asset, related_name='perm_push')\n role = models.ForeignKey(PermRole, related_name='perm_push')\n is_public_key = models.BooleanField(default=False)\n is_password = models.BooleanField(default=False)\n success = models.BooleanField(default=False)\n result = models.TextField(default='')\n date_added = models.DateTimeField(auto_now=True)\n\n", "path": "jperm/models.py"}], "after_files": [{"content": "import datetime\n\nfrom django.db import models\nfrom jasset.models import Asset, AssetGroup\nfrom juser.models import User, UserGroup\n\n\nclass PermLog(models.Model):\n datetime = models.DateTimeField(auto_now_add=True)\n action = models.CharField(max_length=100, null=True, blank=True, default='')\n results = models.CharField(max_length=1000, null=True, blank=True, default='')\n is_success = models.BooleanField(default=False)\n is_finish = models.BooleanField(default=False)\n\n\nclass PermSudo(models.Model):\n name = models.CharField(max_length=100, unique=True)\n date_added = models.DateTimeField(auto_now=True)\n commands = models.TextField()\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRole(models.Model):\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100, null=True, blank=True, default='')\n password = models.CharField(max_length=128)\n key_path = models.CharField(max_length=100)\n date_added = models.DateTimeField(auto_now=True)\n sudo = models.ManyToManyField(PermSudo, related_name='perm_role')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermRule(models.Model):\n date_added = models.DateTimeField(auto_now=True)\n name = models.CharField(max_length=100, unique=True)\n comment = models.CharField(max_length=100)\n asset = models.ManyToManyField(Asset, related_name='perm_rule')\n asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule')\n user = models.ManyToManyField(User, related_name='perm_rule')\n user_group = models.ManyToManyField(UserGroup, related_name='perm_rule')\n role = models.ManyToManyField(PermRole, related_name='perm_rule')\n\n def __unicode__(self):\n return self.name\n\n\nclass PermPush(models.Model):\n asset = models.ForeignKey(Asset, related_name='perm_push')\n role = models.ForeignKey(PermRole, related_name='perm_push')\n is_public_key = models.BooleanField(default=False)\n is_password = models.BooleanField(default=False)\n success = models.BooleanField(default=False)\n result = models.TextField(default='')\n date_added = models.DateTimeField(auto_now=True)\n\n", "path": "jperm/models.py"}]} | 895 | 146 |
gh_patches_debug_25710 | rasdani/github-patches | git_diff | spacetelescope__jwql-991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image view for segmented files is not working
e.g. https://jwql.stsci.edu/NIRCam/jw01442001001_04103_00001-seg004_nrca3/
This is giving a "Page not found" error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/website/apps/jwql/urls.py`
Content:
```
1 """Maps URL paths to views in the ``jwql`` app.
2
3 This module connects requested URL paths to the corresponding view in
4 ``views.py`` for each webpage in the ``jwql`` app. When django is
5 provided a path, it searches through the ``urlpatterns`` list provided
6 here until it finds one that matches. It then calls the assigned view
7 to load the appropriate webpage, passing an ``HttpRequest`` object.
8
9 Authors
10 -------
11
12 - Lauren Chambers
13 - Matthew Bourque
14 - Johannes Sahlmann
15 - Teagan King
16
17 Use
18 ---
19
20 Function views
21 1. Add an import: from my_app import views
22 2. Add a URL to urlpatterns: path('', views.home, name='home')
23 Class-based views
24 1. Add an import: from other_app.views import Home
25 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
26 Including another URLconf
27 1. Import the include() function: from django.urls import include, path
28 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
29
30 References
31 ----------
32 For more information please see:
33 ``https://docs.djangoproject.com/en/2.0/topics/http/urls/``
34
35 Notes
36 -----
37 Be aware that when a url is requested, it will be directed to the
38 first matching path in the ``urlpatterns`` list that it finds. The
39 ``<str:var>`` tag is just a placeholder. To avoid complications,
40 users should order their paths in order from shortest to longest,
41 and after that from most to least specific.
42 """
43
44 from django.urls import path
45 from django.urls import re_path
46
47 from . import api_views
48 from . import monitor_views
49 from . import views
50
51 app_name = 'jwql'
52 instruments = 'nircam|NIRCam|niriss|NIRISS|nirspec|NIRSpec|miri|MIRI|fgs|FGS'
53
54 urlpatterns = [
55
56 # Home
57 path('', views.home, name='home'),
58
59 # MIRI-specific views
60 path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'),
61
62 # NIRSpec-specific views
63 path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'),
64
65 # Common monitor views
66 re_path(r'^(?P<inst>({}))/dark_monitor/$'.format(instruments), monitor_views.dark_monitor, name='dark_monitor'),
67 re_path(r'^(?P<inst>({}))/bad_pixel_monitor/$'.format(instruments), monitor_views.bad_pixel_monitor, name='bad_pixel_monitor'),
68 re_path(r'^(?P<inst>({}))/bias_monitor/$'.format(instruments), monitor_views.bias_monitor, name='bias_monitor'),
69 re_path(r'^(?P<inst>({}))/readnoise_monitor/$'.format(instruments), monitor_views.readnoise_monitor, name='readnoise_monitor'),
70
71 # Main site views
72 path('about/', views.about, name='about'),
73 path('anomaly_query/', views.anomaly_query, name='anomaly_query'),
74 path('api/', views.api_landing, name='api'),
75 path('dashboard/', views.dashboard, name='dashboard'),
76 path('download_table/<str:tablename>', views.export, name='download_table'),
77 path('edb/', views.engineering_database, name='edb'),
78 path('jwqldb/', views.jwqldb_table_viewer, name='jwqldb'),
79 path('jwqldb/<str:tablename_param>', views.jwqldb_table_viewer, name='jwqldb_table_viewer'),
80 path('query_submit/', views.query_submit, name='query_submit'),
81 re_path(r'^(?P<inst>({}))/$'.format(instruments), views.instrument, name='instrument'),
82 re_path(r'^(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),
83 re_path(r'^(?P<inst>({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),
84 re_path(r'^(?P<inst>({}))/(?P<file_root>[\w]+)/$'.format(instruments), views.view_image, name='view_image'),
85 re_path(r'^(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'),
86 re_path(r'^(?P<inst>({}))/(?P<filename>.+)_(?P<filetype>.+)/header/'.format(instruments), views.view_header, name='view_header'),
87 re_path(r'^(?P<inst>({}))/archive/(?P<proposal>[\d]{{1,5}})/obs(?P<observation>[\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'),
88
89 # AJAX views
90 re_path('ajax/query_submit/', views.archive_thumbnails_query_ajax, name='archive_thumb_query_ajax'),
91 re_path(r'^ajax/(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals_ajax, name='archive_ajax'),
92 re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),
93 re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image_(?P<scaling>.+)_(?P<low_lim>.+)_(?P<high_lim>.+)/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),
94 re_path(r'^ajax/(?P<inst>({}))/archive/(?P<proposal>[\d]{{1,5}})/obs(?P<observation>[\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'),
95
96 # REST API views
97 path('api/proposals/', api_views.all_proposals, name='all_proposals'),
98 re_path(r'^api/(?P<inst>({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'),
99 re_path(r'^api/(?P<inst>({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'),
100 re_path(r'^api/(?P<inst>({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'),
101 re_path(r'^api/(?P<proposal>[\d]{1,5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'),
102 re_path(r'^api/(?P<proposal>[\d]{1,5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'),
103 re_path(r'^api/(?P<proposal>[\d]{1,5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'),
104 re_path(r'^api/(?P<rootname>[\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'),
105 re_path(r'^api/(?P<rootname>[\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'),
106 re_path(r'^api/(?P<rootname>[\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'),
107 ]
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py
--- a/jwql/website/apps/jwql/urls.py
+++ b/jwql/website/apps/jwql/urls.py
@@ -81,7 +81,7 @@
re_path(r'^(?P<inst>({}))/$'.format(instruments), views.instrument, name='instrument'),
re_path(r'^(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),
re_path(r'^(?P<inst>({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),
- re_path(r'^(?P<inst>({}))/(?P<file_root>[\w]+)/$'.format(instruments), views.view_image, name='view_image'),
+ re_path(r'^(?P<inst>({}))/(?P<file_root>[\w-]+)/$'.format(instruments), views.view_image, name='view_image'),
re_path(r'^(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'),
re_path(r'^(?P<inst>({}))/(?P<filename>.+)_(?P<filetype>.+)/header/'.format(instruments), views.view_header, name='view_header'),
re_path(r'^(?P<inst>({}))/archive/(?P<proposal>[\d]{{1,5}})/obs(?P<observation>[\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'),
| {"golden_diff": "diff --git a/jwql/website/apps/jwql/urls.py b/jwql/website/apps/jwql/urls.py\n--- a/jwql/website/apps/jwql/urls.py\n+++ b/jwql/website/apps/jwql/urls.py\n@@ -81,7 +81,7 @@\n re_path(r'^(?P<inst>({}))/$'.format(instruments), views.instrument, name='instrument'),\n re_path(r'^(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),\n re_path(r'^(?P<inst>({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),\n- re_path(r'^(?P<inst>({}))/(?P<file_root>[\\w]+)/$'.format(instruments), views.view_image, name='view_image'),\n+ re_path(r'^(?P<inst>({}))/(?P<file_root>[\\w-]+)/$'.format(instruments), views.view_image, name='view_image'),\n re_path(r'^(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'),\n re_path(r'^(?P<inst>({}))/(?P<filename>.+)_(?P<filetype>.+)/header/'.format(instruments), views.view_header, name='view_header'),\n re_path(r'^(?P<inst>({}))/archive/(?P<proposal>[\\d]{{1,5}})/obs(?P<observation>[\\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'),\n", "issue": "Image view for segmented files is not working\ne.g. https://jwql.stsci.edu/NIRCam/jw01442001001_04103_00001-seg004_nrca3/\r\n\r\nThis is giving a \"Page not found\" error.\n", "before_files": [{"content": "\"\"\"Maps URL paths to views in the ``jwql`` app.\n\nThis module connects requested URL paths to the corresponding view in\n``views.py`` for each webpage in the ``jwql`` app. When django is\nprovided a path, it searches through the ``urlpatterns`` list provided\nhere until it finds one that matches. It then calls the assigned view\nto load the appropriate webpage, passing an ``HttpRequest`` object.\n\nAuthors\n-------\n\n - Lauren Chambers\n - Matthew Bourque\n - Johannes Sahlmann\n - Teagan King\n\nUse\n---\n\n Function views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\n Class-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\n Including another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\nReferences\n----------\n For more information please see:\n ``https://docs.djangoproject.com/en/2.0/topics/http/urls/``\n\nNotes\n-----\n Be aware that when a url is requested, it will be directed to the\n first matching path in the ``urlpatterns`` list that it finds. The\n ``<str:var>`` tag is just a placeholder. To avoid complications,\n users should order their paths in order from shortest to longest,\n and after that from most to least specific.\n\"\"\"\n\nfrom django.urls import path\nfrom django.urls import re_path\n\nfrom . import api_views\nfrom . import monitor_views\nfrom . import views\n\napp_name = 'jwql'\ninstruments = 'nircam|NIRCam|niriss|NIRISS|nirspec|NIRSpec|miri|MIRI|fgs|FGS'\n\nurlpatterns = [\n\n # Home\n path('', views.home, name='home'),\n\n # MIRI-specific views\n path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'),\n\n # NIRSpec-specific views\n path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'),\n\n # Common monitor views\n re_path(r'^(?P<inst>({}))/dark_monitor/$'.format(instruments), monitor_views.dark_monitor, name='dark_monitor'),\n re_path(r'^(?P<inst>({}))/bad_pixel_monitor/$'.format(instruments), monitor_views.bad_pixel_monitor, name='bad_pixel_monitor'),\n re_path(r'^(?P<inst>({}))/bias_monitor/$'.format(instruments), monitor_views.bias_monitor, name='bias_monitor'),\n re_path(r'^(?P<inst>({}))/readnoise_monitor/$'.format(instruments), monitor_views.readnoise_monitor, name='readnoise_monitor'),\n\n # Main site views\n path('about/', views.about, name='about'),\n path('anomaly_query/', views.anomaly_query, name='anomaly_query'),\n path('api/', views.api_landing, name='api'),\n path('dashboard/', views.dashboard, name='dashboard'),\n path('download_table/<str:tablename>', views.export, name='download_table'),\n path('edb/', views.engineering_database, name='edb'),\n path('jwqldb/', views.jwqldb_table_viewer, name='jwqldb'),\n path('jwqldb/<str:tablename_param>', views.jwqldb_table_viewer, name='jwqldb_table_viewer'),\n path('query_submit/', views.query_submit, name='query_submit'),\n re_path(r'^(?P<inst>({}))/$'.format(instruments), views.instrument, name='instrument'),\n re_path(r'^(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),\n re_path(r'^(?P<inst>({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),\n re_path(r'^(?P<inst>({}))/(?P<file_root>[\\w]+)/$'.format(instruments), views.view_image, name='view_image'),\n re_path(r'^(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'),\n re_path(r'^(?P<inst>({}))/(?P<filename>.+)_(?P<filetype>.+)/header/'.format(instruments), views.view_header, name='view_header'),\n re_path(r'^(?P<inst>({}))/archive/(?P<proposal>[\\d]{{1,5}})/obs(?P<observation>[\\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'),\n\n # AJAX views\n re_path('ajax/query_submit/', views.archive_thumbnails_query_ajax, name='archive_thumb_query_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals_ajax, name='archive_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image_(?P<scaling>.+)_(?P<low_lim>.+)_(?P<high_lim>.+)/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/archive/(?P<proposal>[\\d]{{1,5}})/obs(?P<observation>[\\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'),\n\n # REST API views\n path('api/proposals/', api_views.all_proposals, name='all_proposals'),\n re_path(r'^api/(?P<inst>({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'),\n re_path(r'^api/(?P<inst>({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'),\n re_path(r'^api/(?P<inst>({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'),\n re_path(r'^api/(?P<rootname>[\\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'),\n re_path(r'^api/(?P<rootname>[\\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'),\n re_path(r'^api/(?P<rootname>[\\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'),\n]\n", "path": "jwql/website/apps/jwql/urls.py"}], "after_files": [{"content": "\"\"\"Maps URL paths to views in the ``jwql`` app.\n\nThis module connects requested URL paths to the corresponding view in\n``views.py`` for each webpage in the ``jwql`` app. When django is\nprovided a path, it searches through the ``urlpatterns`` list provided\nhere until it finds one that matches. It then calls the assigned view\nto load the appropriate webpage, passing an ``HttpRequest`` object.\n\nAuthors\n-------\n\n - Lauren Chambers\n - Matthew Bourque\n - Johannes Sahlmann\n - Teagan King\n\nUse\n---\n\n Function views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\n Class-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\n Including another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\nReferences\n----------\n For more information please see:\n ``https://docs.djangoproject.com/en/2.0/topics/http/urls/``\n\nNotes\n-----\n Be aware that when a url is requested, it will be directed to the\n first matching path in the ``urlpatterns`` list that it finds. The\n ``<str:var>`` tag is just a placeholder. To avoid complications,\n users should order their paths in order from shortest to longest,\n and after that from most to least specific.\n\"\"\"\n\nfrom django.urls import path\nfrom django.urls import re_path\n\nfrom . import api_views\nfrom . import monitor_views\nfrom . import views\n\napp_name = 'jwql'\ninstruments = 'nircam|NIRCam|niriss|NIRISS|nirspec|NIRSpec|miri|MIRI|fgs|FGS'\n\nurlpatterns = [\n\n # Home\n path('', views.home, name='home'),\n\n # MIRI-specific views\n path('miri/miri_data_trending/', views.miri_data_trending, name='miri_data_trending'),\n\n # NIRSpec-specific views\n path('nirspec/nirspec_data_trending/', views.nirspec_data_trending, name='nirspec_data_trending'),\n\n # Common monitor views\n re_path(r'^(?P<inst>({}))/dark_monitor/$'.format(instruments), monitor_views.dark_monitor, name='dark_monitor'),\n re_path(r'^(?P<inst>({}))/bad_pixel_monitor/$'.format(instruments), monitor_views.bad_pixel_monitor, name='bad_pixel_monitor'),\n re_path(r'^(?P<inst>({}))/bias_monitor/$'.format(instruments), monitor_views.bias_monitor, name='bias_monitor'),\n re_path(r'^(?P<inst>({}))/readnoise_monitor/$'.format(instruments), monitor_views.readnoise_monitor, name='readnoise_monitor'),\n\n # Main site views\n path('about/', views.about, name='about'),\n path('anomaly_query/', views.anomaly_query, name='anomaly_query'),\n path('api/', views.api_landing, name='api'),\n path('dashboard/', views.dashboard, name='dashboard'),\n path('download_table/<str:tablename>', views.export, name='download_table'),\n path('edb/', views.engineering_database, name='edb'),\n path('jwqldb/', views.jwqldb_table_viewer, name='jwqldb'),\n path('jwqldb/<str:tablename_param>', views.jwqldb_table_viewer, name='jwqldb_table_viewer'),\n path('query_submit/', views.query_submit, name='query_submit'),\n re_path(r'^(?P<inst>({}))/$'.format(instruments), views.instrument, name='instrument'),\n re_path(r'^(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals, name='archive'),\n re_path(r'^(?P<inst>({}))/unlooked/$'.format(instruments), views.unlooked_images, name='unlooked'),\n re_path(r'^(?P<inst>({}))/(?P<file_root>[\\w-]+)/$'.format(instruments), views.view_image, name='view_image'),\n re_path(r'^(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/'.format(instruments), views.explore_image, name='explore_image'),\n re_path(r'^(?P<inst>({}))/(?P<filename>.+)_(?P<filetype>.+)/header/'.format(instruments), views.view_header, name='view_header'),\n re_path(r'^(?P<inst>({}))/archive/(?P<proposal>[\\d]{{1,5}})/obs(?P<observation>[\\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_per_observation, name='archive_thumb_per_obs'),\n\n # AJAX views\n re_path('ajax/query_submit/', views.archive_thumbnails_query_ajax, name='archive_thumb_query_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/archive/$'.format(instruments), views.archived_proposals_ajax, name='archive_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/(?P<file_root>.+)_(?P<filetype>.+)/explore_image_(?P<scaling>.+)_(?P<low_lim>.+)_(?P<high_lim>.+)/$'.format(instruments), views.explore_image_ajax, name='explore_image_ajax'),\n re_path(r'^ajax/(?P<inst>({}))/archive/(?P<proposal>[\\d]{{1,5}})/obs(?P<observation>[\\d]{{1,3}})/$'.format(instruments), views.archive_thumbnails_ajax, name='archive_thumb_ajax'),\n\n # REST API views\n path('api/proposals/', api_views.all_proposals, name='all_proposals'),\n re_path(r'^api/(?P<inst>({}))/proposals/$'.format(instruments), api_views.instrument_proposals, name='instrument_proposals'),\n re_path(r'^api/(?P<inst>({}))/preview_images/$'.format(instruments), api_views.preview_images_by_instrument, name='preview_images_by_instrument'),\n re_path(r'^api/(?P<inst>({}))/thumbnails/$'.format(instruments), api_views.thumbnails_by_instrument, name='thumbnails_by_instrument'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/filenames/$', api_views.filenames_by_proposal, name='filenames_by_proposal'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/preview_images/$', api_views.preview_images_by_proposal, name='preview_images_by_proposal'),\n re_path(r'^api/(?P<proposal>[\\d]{1,5})/thumbnails/$', api_views.thumbnails_by_proposal, name='preview_images_by_proposal'),\n re_path(r'^api/(?P<rootname>[\\w]+)/filenames/$', api_views.filenames_by_rootname, name='filenames_by_rootname'),\n re_path(r'^api/(?P<rootname>[\\w]+)/preview_images/$', api_views.preview_images_by_rootname, name='preview_images_by_rootname'),\n re_path(r'^api/(?P<rootname>[\\w]+)/thumbnails/$', api_views.thumbnails_by_rootname, name='thumbnails_by_rootname'),\n]\n", "path": "jwql/website/apps/jwql/urls.py"}]} | 2,250 | 392 |
gh_patches_debug_6958 | rasdani/github-patches | git_diff | mozilla__telemetry-analysis-service-1493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Email server shutdown warning again after extending cluster
Hey, would it be possible to send another hour warning email if a user extends the cluster life?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `atmo/clusters/models.py`
Content:
```
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
4 import math
5 from datetime import timedelta
6
7 from autorepr import autorepr, autostr
8 from django.db import models, transaction
9 from django.utils import timezone
10
11 from ..models import CreatedByModel, EditedAtModel, URLActionModel
12 from .provisioners import ClusterProvisioner
13 from .queries import ClusterQuerySet, EMRReleaseQuerySet
14 from atmo.stats.models import Metric
15
16
17 class EMRRelease(EditedAtModel):
18 version = models.CharField(
19 max_length=50,
20 primary_key=True,
21 )
22 changelog_url = models.TextField(
23 help_text='The URL of the changelog with details about the release.',
24 default='',
25 )
26 help_text = models.TextField(
27 help_text='Optional help text to show for users when creating a cluster.',
28 default='',
29 )
30 is_active = models.BooleanField(
31 help_text='Whether this version should be shown to the user at all.',
32 default=True,
33 )
34 is_experimental = models.BooleanField(
35 help_text='Whether this version should be shown to users as experimental.',
36 default=False,
37 )
38 is_deprecated = models.BooleanField(
39 help_text='Whether this version should be shown to users as deprecated.',
40 default=False,
41 )
42
43 objects = EMRReleaseQuerySet.as_manager()
44
45 class Meta:
46 ordering = ['-version']
47 get_latest_by = 'created_at'
48 verbose_name = 'EMR release'
49 verbose_name_plural = 'EMR releases'
50
51 __str__ = autostr('{self.version}')
52
53 __repr__ = autorepr(['version', 'is_active', 'is_experimental', 'is_deprecated'])
54
55
56 class EMRReleaseModel(models.Model):
57 EMR_RELEASE_HELP = (
58 'Different AWS EMR versions have different versions '
59 'of software like Hadoop, Spark, etc. '
60 'See <a href="'
61 'http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html"'
62 '>what\'s new</a> in each.'
63 )
64 emr_release = models.ForeignKey(
65 EMRRelease,
66 verbose_name='EMR release',
67 on_delete=models.PROTECT,
68 related_name='created_%(class)ss', # e.g. emr_release.created_clusters.all()
69 help_text=EMR_RELEASE_HELP,
70 )
71
72 class Meta:
73 abstract = True
74
75
76 class Cluster(EMRReleaseModel, CreatedByModel, EditedAtModel, URLActionModel):
77 STATUS_STARTING = 'STARTING'
78 STATUS_BOOTSTRAPPING = 'BOOTSTRAPPING'
79 STATUS_RUNNING = 'RUNNING'
80 STATUS_WAITING = 'WAITING'
81 STATUS_TERMINATING = 'TERMINATING'
82 STATUS_TERMINATED = 'TERMINATED'
83 STATUS_TERMINATED_WITH_ERRORS = 'TERMINATED_WITH_ERRORS'
84
85 ACTIVE_STATUS_LIST = (
86 STATUS_STARTING,
87 STATUS_BOOTSTRAPPING,
88 STATUS_RUNNING,
89 STATUS_WAITING,
90 STATUS_TERMINATING,
91 )
92 READY_STATUS_LIST = [
93 STATUS_RUNNING,
94 STATUS_WAITING,
95 ]
96 TERMINATED_STATUS_LIST = (
97 STATUS_TERMINATED,
98 )
99 FAILED_STATUS_LIST = (
100 STATUS_TERMINATED_WITH_ERRORS,
101 )
102 FINAL_STATUS_LIST = TERMINATED_STATUS_LIST + FAILED_STATUS_LIST
103
104 STATE_CHANGE_REASON_INTERNAL_ERROR = 'INTERNAL_ERROR'
105 STATE_CHANGE_REASON_VALIDATION_ERROR = 'VALIDATION_ERROR'
106 STATE_CHANGE_REASON_INSTANCE_FAILURE = 'INSTANCE_FAILURE'
107 STATE_CHANGE_REASON_BOOTSTRAP_FAILURE = 'BOOTSTRAP_FAILURE'
108 STATE_CHANGE_REASON_USER_REQUEST = 'USER_REQUEST'
109 STATE_CHANGE_REASON_STEP_FAILURE = 'STEP_FAILURE'
110 STATE_CHANGE_REASON_ALL_STEPS_COMPLETED = 'ALL_STEPS_COMPLETED'
111 FAILED_STATE_CHANGE_REASON_LIST = [
112 STATE_CHANGE_REASON_INTERNAL_ERROR,
113 STATE_CHANGE_REASON_VALIDATION_ERROR,
114 STATE_CHANGE_REASON_INSTANCE_FAILURE,
115 STATE_CHANGE_REASON_BOOTSTRAP_FAILURE,
116 STATE_CHANGE_REASON_STEP_FAILURE,
117 ]
118 REQUESTED_STATE_CHANGE_REASON_LIST = [
119 STATE_CHANGE_REASON_USER_REQUEST,
120 ]
121 COMPLETED_STATE_CHANGE_REASON_LIST = [
122 STATE_CHANGE_REASON_ALL_STEPS_COMPLETED,
123 ]
124 DEFAULT_SIZE = 1
125 DEFAULT_LIFETIME = 8
126
127 identifier = models.CharField(
128 max_length=100,
129 help_text="Cluster name, used to non-uniqely identify individual clusters."
130 )
131 size = models.IntegerField(
132 help_text="Number of computers used in the cluster."
133 )
134 lifetime = models.PositiveSmallIntegerField(
135 help_text="Lifetime of the cluster after which it's automatically terminated, in hours.",
136 default=DEFAULT_LIFETIME,
137 )
138 lifetime_extension_count = models.PositiveSmallIntegerField(
139 help_text="Number of lifetime extensions.",
140 default=0,
141 )
142 ssh_key = models.ForeignKey(
143 'keys.SSHKey',
144 on_delete=models.SET_NULL,
145 blank=True,
146 null=True,
147 related_name='launched_clusters', # e.g. ssh_key.launched_clusters.all()
148 help_text="SSH key to use when launching the cluster.",
149 )
150 expires_at = models.DateTimeField(
151 blank=True,
152 null=True,
153 help_text="Date/time that the cluster will expire and automatically be deleted.",
154 )
155 started_at = models.DateTimeField(
156 blank=True,
157 null=True,
158 help_text="Date/time when the cluster was started on AWS EMR.",
159 )
160 ready_at = models.DateTimeField(
161 blank=True,
162 null=True,
163 help_text="Date/time when the cluster was ready to run steps on AWS EMR.",
164 )
165 finished_at = models.DateTimeField(
166 blank=True,
167 null=True,
168 help_text="Date/time when the cluster was terminated or failed on AWS EMR.",
169 )
170 jobflow_id = models.CharField(
171 max_length=50,
172 blank=True,
173 null=True,
174 help_text="AWS cluster/jobflow ID for the cluster, used for cluster management.",
175 )
176 most_recent_status = models.CharField(
177 max_length=50,
178 default='',
179 blank=True,
180 help_text="Most recently retrieved AWS status for the cluster.",
181 db_index=True,
182 )
183 master_address = models.CharField(
184 max_length=255,
185 default='',
186 blank=True,
187 help_text=("Public address of the master node."
188 "This is only available once the cluster has bootstrapped"),
189 )
190 expiration_mail_sent = models.BooleanField(
191 default=False,
192 help_text="Whether the expiration mail were sent.",
193 )
194
195 objects = ClusterQuerySet.as_manager()
196
197 class Meta:
198 permissions = [
199 ('view_cluster', 'Can view cluster'),
200 ('maintain_cluster', 'Can maintain cluster'),
201 ]
202
203 __str__ = autostr('{self.identifier}')
204
205 __repr__ = autorepr([
206 'identifier',
207 'most_recent_status',
208 'size',
209 'lifetime',
210 'expires_at',
211 'lifetime_extension_count',
212 ])
213
214 url_prefix = 'clusters'
215 url_actions = ['detail', 'extend', 'terminate']
216
217 def get_absolute_url(self):
218 return self.urls.detail
219
220 @property
221 def is_active(self):
222 """Returns whether the cluster is active or not."""
223 return self.most_recent_status in self.ACTIVE_STATUS_LIST
224
225 @property
226 def is_terminated(self):
227 """Returns whether the cluster is terminated or not."""
228 return self.most_recent_status in self.TERMINATED_STATUS_LIST
229
230 @property
231 def is_failed(self):
232 """Returns whether the cluster has failed or not."""
233 return self.most_recent_status in self.FAILED_STATUS_LIST
234
235 @property
236 def is_terminating(self):
237 """Returns whether the cluster is terminating or not."""
238 return self.most_recent_status == self.STATUS_TERMINATING
239
240 @property
241 def is_ready(self):
242 """Returns whether the cluster is ready or not."""
243 return self.most_recent_status == self.STATUS_WAITING
244
245 @property
246 def is_expiring_soon(self):
247 """Returns whether the cluster is expiring in the next hour."""
248 return self.expires_at <= timezone.now() + timedelta(hours=1)
249
250 @property
251 def provisioner(self):
252 return ClusterProvisioner()
253
254 @property
255 def info(self):
256 """Returns the provisioning information for the cluster."""
257 return self.provisioner.info(self.jobflow_id)
258
259 def sync(self, info=None):
260 """Should be called to update latest cluster status in `self.most_recent_status`."""
261 if info is None:
262 info = self.info
263
264 # Map AWS API fields to Cluster model fields.
265 model_field_map = (
266 ('state', 'most_recent_status'),
267 ('public_dns', 'master_address'),
268 ('creation_datetime', 'started_at'),
269 ('ready_datetime', 'ready_at'),
270 ('end_datetime', 'finished_at'),
271 )
272 save_needed = False
273 date_fields_updated = False
274
275 # set the various model fields to the value the API returned
276 for api_field, model_field in model_field_map:
277 field_value = info.get(api_field)
278 # Only update the field if the value for a field is not set or it
279 # hasn't changed.
280 if field_value is None or field_value == getattr(self, model_field):
281 continue
282 setattr(self, model_field, field_value)
283 save_needed = True
284
285 if model_field in ('started_at', 'ready_at', 'finished_at'):
286 date_fields_updated = True
287
288 if save_needed:
289 with transaction.atomic():
290 self.save()
291
292 with transaction.atomic():
293 if date_fields_updated:
294
295 if self.finished_at:
296 # When cluster is finished, record normalized instance
297 # hours.
298 hours = math.ceil(
299 (self.finished_at - self.started_at).seconds / 60 / 60
300 )
301 normalized_hours = hours * self.size
302 Metric.record(
303 'cluster-normalized-instance-hours', normalized_hours,
304 data={
305 'identifier': self.identifier,
306 'size': self.size,
307 'jobflow_id': self.jobflow_id,
308 }
309 )
310
311 # When cluster is ready, record a count and time to ready.
312 if self.ready_at and not self.finished_at:
313 # A simple count to track number of clusters spun up
314 # successfully.
315 Metric.record('cluster-ready', data={
316 'identifier': self.identifier,
317 'size': self.size,
318 'jobflow_id': self.jobflow_id,
319 })
320 # Time in seconds it took the cluster to be ready.
321 time_to_ready = (self.ready_at - self.started_at).seconds
322 Metric.record(
323 'cluster-time-to-ready', time_to_ready,
324 data={
325 'identifier': self.identifier,
326 'size': self.size,
327 'jobflow_id': self.jobflow_id,
328 }
329 )
330
331 def save(self, *args, **kwargs):
332 """Insert the cluster into the database or update it if already
333 present, spawning the cluster if it's not already spawned.
334 """
335 # actually start the cluster
336 if self.jobflow_id is None:
337 self.jobflow_id = self.provisioner.start(
338 user_username=self.created_by.username,
339 user_email=self.created_by.email,
340 identifier=self.identifier,
341 emr_release=self.emr_release.version,
342 size=self.size,
343 public_key=self.ssh_key.key,
344 )
345 # once we've stored the jobflow id we can fetch the status for the first time
346 transaction.on_commit(self.sync)
347
348 with transaction.atomic():
349 Metric.record('cluster-emr-version',
350 data={'version': self.emr_release.version})
351
352 # set the dates
353 if not self.expires_at:
354 # clusters should expire after the lifetime it's set to
355 self.expires_at = timezone.now() + timedelta(hours=self.lifetime)
356
357 super().save(*args, **kwargs)
358
359 def extend(self, hours):
360 """Extend the cluster lifetime by the given number of hours."""
361 self.expires_at = models.F('expires_at') + timedelta(hours=hours)
362 self.lifetime_extension_count = models.F('lifetime_extension_count') + 1
363 self.save()
364
365 with transaction.atomic():
366 Metric.record('cluster-extension', data={
367 'identifier': self.identifier,
368 'size': self.size,
369 'jobflow_id': self.jobflow_id,
370 })
371
372 def deactivate(self):
373 """Shutdown the cluster and update its status accordingly"""
374 self.provisioner.stop(self.jobflow_id)
375 self.sync()
376
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/atmo/clusters/models.py b/atmo/clusters/models.py
--- a/atmo/clusters/models.py
+++ b/atmo/clusters/models.py
@@ -360,6 +360,7 @@
"""Extend the cluster lifetime by the given number of hours."""
self.expires_at = models.F('expires_at') + timedelta(hours=hours)
self.lifetime_extension_count = models.F('lifetime_extension_count') + 1
+ self.expiration_mail_sent = False
self.save()
with transaction.atomic():
| {"golden_diff": "diff --git a/atmo/clusters/models.py b/atmo/clusters/models.py\n--- a/atmo/clusters/models.py\n+++ b/atmo/clusters/models.py\n@@ -360,6 +360,7 @@\n \"\"\"Extend the cluster lifetime by the given number of hours.\"\"\"\n self.expires_at = models.F('expires_at') + timedelta(hours=hours)\n self.lifetime_extension_count = models.F('lifetime_extension_count') + 1\n+ self.expiration_mail_sent = False\n self.save()\n \n with transaction.atomic():\n", "issue": "Email server shutdown warning again after extending cluster\nHey, would it be possible to send another hour warning email if a user extends the cluster life?\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nimport math\nfrom datetime import timedelta\n\nfrom autorepr import autorepr, autostr\nfrom django.db import models, transaction\nfrom django.utils import timezone\n\nfrom ..models import CreatedByModel, EditedAtModel, URLActionModel\nfrom .provisioners import ClusterProvisioner\nfrom .queries import ClusterQuerySet, EMRReleaseQuerySet\nfrom atmo.stats.models import Metric\n\n\nclass EMRRelease(EditedAtModel):\n version = models.CharField(\n max_length=50,\n primary_key=True,\n )\n changelog_url = models.TextField(\n help_text='The URL of the changelog with details about the release.',\n default='',\n )\n help_text = models.TextField(\n help_text='Optional help text to show for users when creating a cluster.',\n default='',\n )\n is_active = models.BooleanField(\n help_text='Whether this version should be shown to the user at all.',\n default=True,\n )\n is_experimental = models.BooleanField(\n help_text='Whether this version should be shown to users as experimental.',\n default=False,\n )\n is_deprecated = models.BooleanField(\n help_text='Whether this version should be shown to users as deprecated.',\n default=False,\n )\n\n objects = EMRReleaseQuerySet.as_manager()\n\n class Meta:\n ordering = ['-version']\n get_latest_by = 'created_at'\n verbose_name = 'EMR release'\n verbose_name_plural = 'EMR releases'\n\n __str__ = autostr('{self.version}')\n\n __repr__ = autorepr(['version', 'is_active', 'is_experimental', 'is_deprecated'])\n\n\nclass EMRReleaseModel(models.Model):\n EMR_RELEASE_HELP = (\n 'Different AWS EMR versions have different versions '\n 'of software like Hadoop, Spark, etc. '\n 'See <a href=\"'\n 'http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html\"'\n '>what\\'s new</a> in each.'\n )\n emr_release = models.ForeignKey(\n EMRRelease,\n verbose_name='EMR release',\n on_delete=models.PROTECT,\n related_name='created_%(class)ss', # e.g. emr_release.created_clusters.all()\n help_text=EMR_RELEASE_HELP,\n )\n\n class Meta:\n abstract = True\n\n\nclass Cluster(EMRReleaseModel, CreatedByModel, EditedAtModel, URLActionModel):\n STATUS_STARTING = 'STARTING'\n STATUS_BOOTSTRAPPING = 'BOOTSTRAPPING'\n STATUS_RUNNING = 'RUNNING'\n STATUS_WAITING = 'WAITING'\n STATUS_TERMINATING = 'TERMINATING'\n STATUS_TERMINATED = 'TERMINATED'\n STATUS_TERMINATED_WITH_ERRORS = 'TERMINATED_WITH_ERRORS'\n\n ACTIVE_STATUS_LIST = (\n STATUS_STARTING,\n STATUS_BOOTSTRAPPING,\n STATUS_RUNNING,\n STATUS_WAITING,\n STATUS_TERMINATING,\n )\n READY_STATUS_LIST = [\n STATUS_RUNNING,\n STATUS_WAITING,\n ]\n TERMINATED_STATUS_LIST = (\n STATUS_TERMINATED,\n )\n FAILED_STATUS_LIST = (\n STATUS_TERMINATED_WITH_ERRORS,\n )\n FINAL_STATUS_LIST = TERMINATED_STATUS_LIST + FAILED_STATUS_LIST\n\n STATE_CHANGE_REASON_INTERNAL_ERROR = 'INTERNAL_ERROR'\n STATE_CHANGE_REASON_VALIDATION_ERROR = 'VALIDATION_ERROR'\n STATE_CHANGE_REASON_INSTANCE_FAILURE = 'INSTANCE_FAILURE'\n STATE_CHANGE_REASON_BOOTSTRAP_FAILURE = 'BOOTSTRAP_FAILURE'\n STATE_CHANGE_REASON_USER_REQUEST = 'USER_REQUEST'\n STATE_CHANGE_REASON_STEP_FAILURE = 'STEP_FAILURE'\n STATE_CHANGE_REASON_ALL_STEPS_COMPLETED = 'ALL_STEPS_COMPLETED'\n FAILED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_INTERNAL_ERROR,\n STATE_CHANGE_REASON_VALIDATION_ERROR,\n STATE_CHANGE_REASON_INSTANCE_FAILURE,\n STATE_CHANGE_REASON_BOOTSTRAP_FAILURE,\n STATE_CHANGE_REASON_STEP_FAILURE,\n ]\n REQUESTED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_USER_REQUEST,\n ]\n COMPLETED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_ALL_STEPS_COMPLETED,\n ]\n DEFAULT_SIZE = 1\n DEFAULT_LIFETIME = 8\n\n identifier = models.CharField(\n max_length=100,\n help_text=\"Cluster name, used to non-uniqely identify individual clusters.\"\n )\n size = models.IntegerField(\n help_text=\"Number of computers used in the cluster.\"\n )\n lifetime = models.PositiveSmallIntegerField(\n help_text=\"Lifetime of the cluster after which it's automatically terminated, in hours.\",\n default=DEFAULT_LIFETIME,\n )\n lifetime_extension_count = models.PositiveSmallIntegerField(\n help_text=\"Number of lifetime extensions.\",\n default=0,\n )\n ssh_key = models.ForeignKey(\n 'keys.SSHKey',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='launched_clusters', # e.g. ssh_key.launched_clusters.all()\n help_text=\"SSH key to use when launching the cluster.\",\n )\n expires_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the cluster will expire and automatically be deleted.\",\n )\n started_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was started on AWS EMR.\",\n )\n ready_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was ready to run steps on AWS EMR.\",\n )\n finished_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was terminated or failed on AWS EMR.\",\n )\n jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n help_text=\"AWS cluster/jobflow ID for the cluster, used for cluster management.\",\n )\n most_recent_status = models.CharField(\n max_length=50,\n default='',\n blank=True,\n help_text=\"Most recently retrieved AWS status for the cluster.\",\n db_index=True,\n )\n master_address = models.CharField(\n max_length=255,\n default='',\n blank=True,\n help_text=(\"Public address of the master node.\"\n \"This is only available once the cluster has bootstrapped\"),\n )\n expiration_mail_sent = models.BooleanField(\n default=False,\n help_text=\"Whether the expiration mail were sent.\",\n )\n\n objects = ClusterQuerySet.as_manager()\n\n class Meta:\n permissions = [\n ('view_cluster', 'Can view cluster'),\n ('maintain_cluster', 'Can maintain cluster'),\n ]\n\n __str__ = autostr('{self.identifier}')\n\n __repr__ = autorepr([\n 'identifier',\n 'most_recent_status',\n 'size',\n 'lifetime',\n 'expires_at',\n 'lifetime_extension_count',\n ])\n\n url_prefix = 'clusters'\n url_actions = ['detail', 'extend', 'terminate']\n\n def get_absolute_url(self):\n return self.urls.detail\n\n @property\n def is_active(self):\n \"\"\"Returns whether the cluster is active or not.\"\"\"\n return self.most_recent_status in self.ACTIVE_STATUS_LIST\n\n @property\n def is_terminated(self):\n \"\"\"Returns whether the cluster is terminated or not.\"\"\"\n return self.most_recent_status in self.TERMINATED_STATUS_LIST\n\n @property\n def is_failed(self):\n \"\"\"Returns whether the cluster has failed or not.\"\"\"\n return self.most_recent_status in self.FAILED_STATUS_LIST\n\n @property\n def is_terminating(self):\n \"\"\"Returns whether the cluster is terminating or not.\"\"\"\n return self.most_recent_status == self.STATUS_TERMINATING\n\n @property\n def is_ready(self):\n \"\"\"Returns whether the cluster is ready or not.\"\"\"\n return self.most_recent_status == self.STATUS_WAITING\n\n @property\n def is_expiring_soon(self):\n \"\"\"Returns whether the cluster is expiring in the next hour.\"\"\"\n return self.expires_at <= timezone.now() + timedelta(hours=1)\n\n @property\n def provisioner(self):\n return ClusterProvisioner()\n\n @property\n def info(self):\n \"\"\"Returns the provisioning information for the cluster.\"\"\"\n return self.provisioner.info(self.jobflow_id)\n\n def sync(self, info=None):\n \"\"\"Should be called to update latest cluster status in `self.most_recent_status`.\"\"\"\n if info is None:\n info = self.info\n\n # Map AWS API fields to Cluster model fields.\n model_field_map = (\n ('state', 'most_recent_status'),\n ('public_dns', 'master_address'),\n ('creation_datetime', 'started_at'),\n ('ready_datetime', 'ready_at'),\n ('end_datetime', 'finished_at'),\n )\n save_needed = False\n date_fields_updated = False\n\n # set the various model fields to the value the API returned\n for api_field, model_field in model_field_map:\n field_value = info.get(api_field)\n # Only update the field if the value for a field is not set or it\n # hasn't changed.\n if field_value is None or field_value == getattr(self, model_field):\n continue\n setattr(self, model_field, field_value)\n save_needed = True\n\n if model_field in ('started_at', 'ready_at', 'finished_at'):\n date_fields_updated = True\n\n if save_needed:\n with transaction.atomic():\n self.save()\n\n with transaction.atomic():\n if date_fields_updated:\n\n if self.finished_at:\n # When cluster is finished, record normalized instance\n # hours.\n hours = math.ceil(\n (self.finished_at - self.started_at).seconds / 60 / 60\n )\n normalized_hours = hours * self.size\n Metric.record(\n 'cluster-normalized-instance-hours', normalized_hours,\n data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n }\n )\n\n # When cluster is ready, record a count and time to ready.\n if self.ready_at and not self.finished_at:\n # A simple count to track number of clusters spun up\n # successfully.\n Metric.record('cluster-ready', data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n })\n # Time in seconds it took the cluster to be ready.\n time_to_ready = (self.ready_at - self.started_at).seconds\n Metric.record(\n 'cluster-time-to-ready', time_to_ready,\n data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n }\n )\n\n def save(self, *args, **kwargs):\n \"\"\"Insert the cluster into the database or update it if already\n present, spawning the cluster if it's not already spawned.\n \"\"\"\n # actually start the cluster\n if self.jobflow_id is None:\n self.jobflow_id = self.provisioner.start(\n user_username=self.created_by.username,\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release.version,\n size=self.size,\n public_key=self.ssh_key.key,\n )\n # once we've stored the jobflow id we can fetch the status for the first time\n transaction.on_commit(self.sync)\n\n with transaction.atomic():\n Metric.record('cluster-emr-version',\n data={'version': self.emr_release.version})\n\n # set the dates\n if not self.expires_at:\n # clusters should expire after the lifetime it's set to\n self.expires_at = timezone.now() + timedelta(hours=self.lifetime)\n\n super().save(*args, **kwargs)\n\n def extend(self, hours):\n \"\"\"Extend the cluster lifetime by the given number of hours.\"\"\"\n self.expires_at = models.F('expires_at') + timedelta(hours=hours)\n self.lifetime_extension_count = models.F('lifetime_extension_count') + 1\n self.save()\n\n with transaction.atomic():\n Metric.record('cluster-extension', data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n })\n\n def deactivate(self):\n \"\"\"Shutdown the cluster and update its status accordingly\"\"\"\n self.provisioner.stop(self.jobflow_id)\n self.sync()\n", "path": "atmo/clusters/models.py"}], "after_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nimport math\nfrom datetime import timedelta\n\nfrom autorepr import autorepr, autostr\nfrom django.db import models, transaction\nfrom django.utils import timezone\n\nfrom ..models import CreatedByModel, EditedAtModel, URLActionModel\nfrom .provisioners import ClusterProvisioner\nfrom .queries import ClusterQuerySet, EMRReleaseQuerySet\nfrom atmo.stats.models import Metric\n\n\nclass EMRRelease(EditedAtModel):\n version = models.CharField(\n max_length=50,\n primary_key=True,\n )\n changelog_url = models.TextField(\n help_text='The URL of the changelog with details about the release.',\n default='',\n )\n help_text = models.TextField(\n help_text='Optional help text to show for users when creating a cluster.',\n default='',\n )\n is_active = models.BooleanField(\n help_text='Whether this version should be shown to the user at all.',\n default=True,\n )\n is_experimental = models.BooleanField(\n help_text='Whether this version should be shown to users as experimental.',\n default=False,\n )\n is_deprecated = models.BooleanField(\n help_text='Whether this version should be shown to users as deprecated.',\n default=False,\n )\n\n objects = EMRReleaseQuerySet.as_manager()\n\n class Meta:\n ordering = ['-version']\n get_latest_by = 'created_at'\n verbose_name = 'EMR release'\n verbose_name_plural = 'EMR releases'\n\n __str__ = autostr('{self.version}')\n\n __repr__ = autorepr(['version', 'is_active', 'is_experimental', 'is_deprecated'])\n\n\nclass EMRReleaseModel(models.Model):\n EMR_RELEASE_HELP = (\n 'Different AWS EMR versions have different versions '\n 'of software like Hadoop, Spark, etc. '\n 'See <a href=\"'\n 'http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html\"'\n '>what\\'s new</a> in each.'\n )\n emr_release = models.ForeignKey(\n EMRRelease,\n verbose_name='EMR release',\n on_delete=models.PROTECT,\n related_name='created_%(class)ss', # e.g. emr_release.created_clusters.all()\n help_text=EMR_RELEASE_HELP,\n )\n\n class Meta:\n abstract = True\n\n\nclass Cluster(EMRReleaseModel, CreatedByModel, EditedAtModel, URLActionModel):\n STATUS_STARTING = 'STARTING'\n STATUS_BOOTSTRAPPING = 'BOOTSTRAPPING'\n STATUS_RUNNING = 'RUNNING'\n STATUS_WAITING = 'WAITING'\n STATUS_TERMINATING = 'TERMINATING'\n STATUS_TERMINATED = 'TERMINATED'\n STATUS_TERMINATED_WITH_ERRORS = 'TERMINATED_WITH_ERRORS'\n\n ACTIVE_STATUS_LIST = (\n STATUS_STARTING,\n STATUS_BOOTSTRAPPING,\n STATUS_RUNNING,\n STATUS_WAITING,\n STATUS_TERMINATING,\n )\n READY_STATUS_LIST = [\n STATUS_RUNNING,\n STATUS_WAITING,\n ]\n TERMINATED_STATUS_LIST = (\n STATUS_TERMINATED,\n )\n FAILED_STATUS_LIST = (\n STATUS_TERMINATED_WITH_ERRORS,\n )\n FINAL_STATUS_LIST = TERMINATED_STATUS_LIST + FAILED_STATUS_LIST\n\n STATE_CHANGE_REASON_INTERNAL_ERROR = 'INTERNAL_ERROR'\n STATE_CHANGE_REASON_VALIDATION_ERROR = 'VALIDATION_ERROR'\n STATE_CHANGE_REASON_INSTANCE_FAILURE = 'INSTANCE_FAILURE'\n STATE_CHANGE_REASON_BOOTSTRAP_FAILURE = 'BOOTSTRAP_FAILURE'\n STATE_CHANGE_REASON_USER_REQUEST = 'USER_REQUEST'\n STATE_CHANGE_REASON_STEP_FAILURE = 'STEP_FAILURE'\n STATE_CHANGE_REASON_ALL_STEPS_COMPLETED = 'ALL_STEPS_COMPLETED'\n FAILED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_INTERNAL_ERROR,\n STATE_CHANGE_REASON_VALIDATION_ERROR,\n STATE_CHANGE_REASON_INSTANCE_FAILURE,\n STATE_CHANGE_REASON_BOOTSTRAP_FAILURE,\n STATE_CHANGE_REASON_STEP_FAILURE,\n ]\n REQUESTED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_USER_REQUEST,\n ]\n COMPLETED_STATE_CHANGE_REASON_LIST = [\n STATE_CHANGE_REASON_ALL_STEPS_COMPLETED,\n ]\n DEFAULT_SIZE = 1\n DEFAULT_LIFETIME = 8\n\n identifier = models.CharField(\n max_length=100,\n help_text=\"Cluster name, used to non-uniqely identify individual clusters.\"\n )\n size = models.IntegerField(\n help_text=\"Number of computers used in the cluster.\"\n )\n lifetime = models.PositiveSmallIntegerField(\n help_text=\"Lifetime of the cluster after which it's automatically terminated, in hours.\",\n default=DEFAULT_LIFETIME,\n )\n lifetime_extension_count = models.PositiveSmallIntegerField(\n help_text=\"Number of lifetime extensions.\",\n default=0,\n )\n ssh_key = models.ForeignKey(\n 'keys.SSHKey',\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name='launched_clusters', # e.g. ssh_key.launched_clusters.all()\n help_text=\"SSH key to use when launching the cluster.\",\n )\n expires_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the cluster will expire and automatically be deleted.\",\n )\n started_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was started on AWS EMR.\",\n )\n ready_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was ready to run steps on AWS EMR.\",\n )\n finished_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time when the cluster was terminated or failed on AWS EMR.\",\n )\n jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n help_text=\"AWS cluster/jobflow ID for the cluster, used for cluster management.\",\n )\n most_recent_status = models.CharField(\n max_length=50,\n default='',\n blank=True,\n help_text=\"Most recently retrieved AWS status for the cluster.\",\n db_index=True,\n )\n master_address = models.CharField(\n max_length=255,\n default='',\n blank=True,\n help_text=(\"Public address of the master node.\"\n \"This is only available once the cluster has bootstrapped\"),\n )\n expiration_mail_sent = models.BooleanField(\n default=False,\n help_text=\"Whether the expiration mail were sent.\",\n )\n\n objects = ClusterQuerySet.as_manager()\n\n class Meta:\n permissions = [\n ('view_cluster', 'Can view cluster'),\n ('maintain_cluster', 'Can maintain cluster'),\n ]\n\n __str__ = autostr('{self.identifier}')\n\n __repr__ = autorepr([\n 'identifier',\n 'most_recent_status',\n 'size',\n 'lifetime',\n 'expires_at',\n 'lifetime_extension_count',\n ])\n\n url_prefix = 'clusters'\n url_actions = ['detail', 'extend', 'terminate']\n\n def get_absolute_url(self):\n return self.urls.detail\n\n @property\n def is_active(self):\n \"\"\"Returns whether the cluster is active or not.\"\"\"\n return self.most_recent_status in self.ACTIVE_STATUS_LIST\n\n @property\n def is_terminated(self):\n \"\"\"Returns whether the cluster is terminated or not.\"\"\"\n return self.most_recent_status in self.TERMINATED_STATUS_LIST\n\n @property\n def is_failed(self):\n \"\"\"Returns whether the cluster has failed or not.\"\"\"\n return self.most_recent_status in self.FAILED_STATUS_LIST\n\n @property\n def is_terminating(self):\n \"\"\"Returns whether the cluster is terminating or not.\"\"\"\n return self.most_recent_status == self.STATUS_TERMINATING\n\n @property\n def is_ready(self):\n \"\"\"Returns whether the cluster is ready or not.\"\"\"\n return self.most_recent_status == self.STATUS_WAITING\n\n @property\n def is_expiring_soon(self):\n \"\"\"Returns whether the cluster is expiring in the next hour.\"\"\"\n return self.expires_at <= timezone.now() + timedelta(hours=1)\n\n @property\n def provisioner(self):\n return ClusterProvisioner()\n\n @property\n def info(self):\n \"\"\"Returns the provisioning information for the cluster.\"\"\"\n return self.provisioner.info(self.jobflow_id)\n\n def sync(self, info=None):\n \"\"\"Should be called to update latest cluster status in `self.most_recent_status`.\"\"\"\n if info is None:\n info = self.info\n\n # Map AWS API fields to Cluster model fields.\n model_field_map = (\n ('state', 'most_recent_status'),\n ('public_dns', 'master_address'),\n ('creation_datetime', 'started_at'),\n ('ready_datetime', 'ready_at'),\n ('end_datetime', 'finished_at'),\n )\n save_needed = False\n date_fields_updated = False\n\n # set the various model fields to the value the API returned\n for api_field, model_field in model_field_map:\n field_value = info.get(api_field)\n # Only update the field if the value for a field is not set or it\n # hasn't changed.\n if field_value is None or field_value == getattr(self, model_field):\n continue\n setattr(self, model_field, field_value)\n save_needed = True\n\n if model_field in ('started_at', 'ready_at', 'finished_at'):\n date_fields_updated = True\n\n if save_needed:\n with transaction.atomic():\n self.save()\n\n with transaction.atomic():\n if date_fields_updated:\n\n if self.finished_at:\n # When cluster is finished, record normalized instance\n # hours.\n hours = math.ceil(\n (self.finished_at - self.started_at).seconds / 60 / 60\n )\n normalized_hours = hours * self.size\n Metric.record(\n 'cluster-normalized-instance-hours', normalized_hours,\n data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n }\n )\n\n # When cluster is ready, record a count and time to ready.\n if self.ready_at and not self.finished_at:\n # A simple count to track number of clusters spun up\n # successfully.\n Metric.record('cluster-ready', data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n })\n # Time in seconds it took the cluster to be ready.\n time_to_ready = (self.ready_at - self.started_at).seconds\n Metric.record(\n 'cluster-time-to-ready', time_to_ready,\n data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n }\n )\n\n def save(self, *args, **kwargs):\n \"\"\"Insert the cluster into the database or update it if already\n present, spawning the cluster if it's not already spawned.\n \"\"\"\n # actually start the cluster\n if self.jobflow_id is None:\n self.jobflow_id = self.provisioner.start(\n user_username=self.created_by.username,\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release.version,\n size=self.size,\n public_key=self.ssh_key.key,\n )\n # once we've stored the jobflow id we can fetch the status for the first time\n transaction.on_commit(self.sync)\n\n with transaction.atomic():\n Metric.record('cluster-emr-version',\n data={'version': self.emr_release.version})\n\n # set the dates\n if not self.expires_at:\n # clusters should expire after the lifetime it's set to\n self.expires_at = timezone.now() + timedelta(hours=self.lifetime)\n\n super().save(*args, **kwargs)\n\n def extend(self, hours):\n \"\"\"Extend the cluster lifetime by the given number of hours.\"\"\"\n self.expires_at = models.F('expires_at') + timedelta(hours=hours)\n self.lifetime_extension_count = models.F('lifetime_extension_count') + 1\n self.expiration_mail_sent = False\n self.save()\n\n with transaction.atomic():\n Metric.record('cluster-extension', data={\n 'identifier': self.identifier,\n 'size': self.size,\n 'jobflow_id': self.jobflow_id,\n })\n\n def deactivate(self):\n \"\"\"Shutdown the cluster and update its status accordingly\"\"\"\n self.provisioner.stop(self.jobflow_id)\n self.sync()\n", "path": "atmo/clusters/models.py"}]} | 4,013 | 118 |
gh_patches_debug_26118 | rasdani/github-patches | git_diff | pymedusa__Medusa-1370 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: cafile must be None or a byte string?
### Before submitting your issue:
Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)
Branch/Commit: Latest (Saturday 29th Oct)
OS: Debian Linux (FeralHosting Seedbox)
What you did: Updated to the latest commit
What happened: Medusa won't run - spits out this log (below)
What you expected: For it to run
Logs:
```
python ~/.sickrage/SickBeard.py -d -p 10101
Traceback (most recent call last):
File "/media/sdq1/home/whipcracker/.sickrage/SickBeard.py", line 9, in <module>
from start import Application
File "/media/sdq1/home/whipcracker/.sickrage/start.py", line 104, in <module>
from medusa.server.core import AppWebServer
File "/media/sdq1/home/whipcracker/.sickrage/medusa/server/core.py", line 17, in <module>
from .web import CalendarHandler, KeyHandler, LoginHandler, LogoutHandler
File "/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/__init__.py", line 7, in <module>
from .home import Home, HomeAddShows, HomeChangeLog, HomeIRC, HomeNews, HomePostProcess
File "/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/home/__init__.py", line 4, in <module>
from .add_shows import HomeAddShows
File "/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/home/add_shows.py", line 29, in <module>
from ....show.recommendations.trakt import TraktPopular
File "/media/sdq1/home/whipcracker/.sickrage/medusa/show/recommendations/trakt.py", line 170, in <module>
tvdb_api_v2 = get_tvdbv2_api()
File "/media/sdq1/home/whipcracker/.sickrage/medusa/show/recommendations/trakt.py", line 41, in get_tvdbv2_api
access_token = auth_api.login_post(authentication_string)
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/apis/authentication_api.py", line 124, in login_post
callback=params.get('callback'))
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py", line 324, in call_api
response_type, auth_settings, callback)
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py", line 150, in __call_api
post_params=post_params, body=body)
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py", line 360, in request
body=body)
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/rest.py", line 208, in POST
body=body)
File "/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/rest.py", line 143, in request
headers=headers)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/request.py", line 73, in request
**urlopen_kw)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/request.py", line 151, in request_encode_body
return self.urlopen(method, url, **extra_kw)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/poolmanager.py", line 165, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py", line 578, in urlopen
chunked=chunked)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py", line 351, in _make_request
self._validate_conn(conn)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py", line 814, in _validate_conn
conn.connect()
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connection.py", line 289, in connect
ssl_version=resolved_ssl_version)
File "/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/contrib/pyopenssl.py", line 328, in ssl_wrap_socket
ctx.load_verify_locations(ca_certs, ca_cert_dir)
File "/usr/lib/python2.7/dist-packages/OpenSSL/SSL.py", line 303, in load_verify_locations
raise TypeError("cafile must be None or a byte string")
TypeError: cafile must be None or a byte string
```
I've tried deleting the folder and tried a new git clone, but I have the same issue. Never been a problem until the new update. Hope someone can help!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/show/recommendations/trakt.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Medusa.
4 #
5 # Medusa is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Medusa is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Medusa. If not, see <http://www.gnu.org/licenses/>.
17 from __future__ import unicode_literals
18
19 import medusa as app
20 import requests
21 from simpleanidb import Anidb
22 from traktor import (TokenExpiredException, TraktApi, TraktException)
23 from tvdbapiv2 import (ApiClient, AuthenticationApi, SeriesApi)
24 from .recommended import RecommendedShow
25 from ... import logger
26 from ...helper.common import try_int
27 from ...helper.exceptions import MultipleShowObjectsException, ex
28 from ...indexers.indexer_config import INDEXER_TVDBV2
29
30
31 def get_tvdbv2_api():
32 """Initiate the tvdb api v2."""
33 api_base_url = 'https://api.thetvdb.com'
34
35 # client_id = 'username' # (optional! Only required for the /user routes)
36 # client_secret = 'pass' # (optional! Only required for the /user routes)
37 apikey = '0629B785CE550C8D'
38
39 authentication_string = {'apikey': apikey, 'username': '', 'userpass': ''}
40 unauthenticated_client = ApiClient(api_base_url)
41 auth_api = AuthenticationApi(unauthenticated_client)
42 access_token = auth_api.login_post(authentication_string)
43 auth_client = ApiClient(api_base_url, 'Authorization', 'Bearer ' + access_token.token)
44 series_api = SeriesApi(auth_client)
45
46 return series_api
47
48
49 class TraktPopular(object):
50 """This class retrieves a speficed recommended show list from Trakt.
51
52 The list of returned shows is mapped to a RecommendedShow object
53 """
54
55 def __init__(self):
56 """Initialize the trakt recommended list object."""
57 self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__
58 self.session = requests.Session()
59 self.recommender = "Trakt Popular"
60 self.default_img_src = 'trakt-default.png'
61 self.anidb = Anidb(cache_dir=app.CACHE_DIR)
62
63 def _create_recommended_show(self, show_obj):
64 """Create the RecommendedShow object from the returned showobj."""
65 rec_show = RecommendedShow(self,
66 show_obj['show']['ids'], show_obj['show']['title'],
67 INDEXER_TVDBV2, # indexer
68 show_obj['show']['ids']['tvdb'],
69 **{'rating': show_obj['show']['rating'],
70 'votes': try_int(show_obj['show']['votes'], '0'),
71 'image_href': 'http://www.trakt.tv/shows/{0}'.format(show_obj['show']['ids']['slug']),
72 # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',
73 # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}
74 'ids': show_obj['show']['ids']
75 }
76 )
77
78 use_default = None
79 image = None
80 try:
81 image = tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name
82 except Exception:
83 use_default = self.default_img_src
84 logger.log('Missing poster on TheTVDB for show %s' % (show_obj['show']['title']), logger.DEBUG)
85
86 rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)
87 # As the method below requires allot of resources, i've only enabled it when
88 # the shows language or country is 'jp' (japanese). Looks a litle bit akward,
89 # but alternative is allot of resource used
90 if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:
91 rec_show.check_if_anime(self.anidb, show_obj['show']['ids']['tvdb'])
92
93 return rec_show
94
95 @staticmethod
96 def fetch_and_refresh_token(trakt_api, path):
97 """Fetch shows from trakt and store the refresh token when needed."""
98 try:
99 library_shows = trakt_api.request(path) or []
100 if trakt_api.access_token_refreshed:
101 app.TRAKT_ACCESS_TOKEN = trakt_api.access_token
102 app.TRAKT_REFRESH_TOKEN = trakt_api.refresh_token
103 except TokenExpiredException:
104 app.TRAKT_ACCESS_TOKEN = ''
105 raise
106
107 return library_shows
108
109 def fetch_popular_shows(self, page_url=None, trakt_list=None): # pylint: disable=too-many-nested-blocks,too-many-branches
110 """Get a list of popular shows from different Trakt lists based on a provided trakt_list.
111
112 :param page_url: the page url opened to the base api url, for retreiving a specific list
113 :param trakt_list: a description of the trakt list
114 :return: A list of RecommendedShow objects, an empty list of none returned
115 :throw: ``Exception`` if an Exception is thrown not handled by the libtrats exceptions
116 """
117 trending_shows = []
118 removed_from_medusa = []
119
120 # Create a trakt settings dict
121 trakt_settings = {'trakt_api_secret': app.TRAKT_API_SECRET, 'trakt_api_key': app.TRAKT_API_KEY,
122 'trakt_access_token': app.TRAKT_ACCESS_TOKEN}
123
124 trakt_api = TraktApi(timeout=app.TRAKT_TIMEOUT, ssl_verify=app.SSL_VERIFY, **trakt_settings)
125
126 try: # pylint: disable=too-many-nested-blocks
127 not_liked_show = ''
128 if app.TRAKT_ACCESS_TOKEN != '':
129 library_shows = self.fetch_and_refresh_token(trakt_api, 'sync/watched/shows?extended=noseasons') + \
130 self.fetch_and_refresh_token(trakt_api, 'sync/collection/shows?extended=full')
131
132 medusa_shows = [show.indexerid for show in app.showList if show.indexerid]
133 removed_from_medusa = [lshow['show']['ids']['tvdb'] for lshow in library_shows if lshow['show']['ids']['tvdb'] not in medusa_shows]
134
135 if app.TRAKT_BLACKLIST_NAME is not None and app.TRAKT_BLACKLIST_NAME:
136 not_liked_show = trakt_api.request('users/' + app.TRAKT_USERNAME + '/lists/' +
137 app.TRAKT_BLACKLIST_NAME + '/items') or []
138 else:
139 logger.log('Trakt blacklist name is empty', logger.DEBUG)
140
141 if trakt_list not in ['recommended', 'newshow', 'newseason']:
142 limit_show = '?limit=' + str(100 + len(not_liked_show)) + '&'
143 else:
144 limit_show = '?'
145
146 shows = self.fetch_and_refresh_token(trakt_api, page_url + limit_show + 'extended=full,images') or []
147
148 for show in shows:
149 try:
150 if 'show' not in show:
151 show['show'] = show
152
153 if not_liked_show:
154 if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb']
155 for show in not_liked_show if show['type'] == 'show'):
156 trending_shows.append(self._create_recommended_show(show))
157 else:
158 trending_shows.append(self._create_recommended_show(show))
159
160 except MultipleShowObjectsException:
161 continue
162
163 blacklist = app.TRAKT_BLACKLIST_NAME not in ''
164
165 except TraktException as e:
166 logger.log('Could not connect to Trakt service: %s' % ex(e), logger.WARNING)
167 raise
168
169 return blacklist, trending_shows, removed_from_medusa
170
171 tvdb_api_v2 = get_tvdbv2_api()
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/show/recommendations/trakt.py b/medusa/show/recommendations/trakt.py
--- a/medusa/show/recommendations/trakt.py
+++ b/medusa/show/recommendations/trakt.py
@@ -59,6 +59,7 @@
self.recommender = "Trakt Popular"
self.default_img_src = 'trakt-default.png'
self.anidb = Anidb(cache_dir=app.CACHE_DIR)
+ self.tvdb_api_v2 = get_tvdbv2_api()
def _create_recommended_show(self, show_obj):
"""Create the RecommendedShow object from the returned showobj."""
@@ -78,7 +79,7 @@
use_default = None
image = None
try:
- image = tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name
+ image = self.tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name
except Exception:
use_default = self.default_img_src
logger.log('Missing poster on TheTVDB for show %s' % (show_obj['show']['title']), logger.DEBUG)
@@ -167,5 +168,3 @@
raise
return blacklist, trending_shows, removed_from_medusa
-
-tvdb_api_v2 = get_tvdbv2_api()
| {"golden_diff": "diff --git a/medusa/show/recommendations/trakt.py b/medusa/show/recommendations/trakt.py\n--- a/medusa/show/recommendations/trakt.py\n+++ b/medusa/show/recommendations/trakt.py\n@@ -59,6 +59,7 @@\n self.recommender = \"Trakt Popular\"\n self.default_img_src = 'trakt-default.png'\n self.anidb = Anidb(cache_dir=app.CACHE_DIR)\n+ self.tvdb_api_v2 = get_tvdbv2_api()\n \n def _create_recommended_show(self, show_obj):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n@@ -78,7 +79,7 @@\n use_default = None\n image = None\n try:\n- image = tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n+ image = self.tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n except Exception:\n use_default = self.default_img_src\n logger.log('Missing poster on TheTVDB for show %s' % (show_obj['show']['title']), logger.DEBUG)\n@@ -167,5 +168,3 @@\n raise\n \n return blacklist, trending_shows, removed_from_medusa\n-\n-tvdb_api_v2 = get_tvdbv2_api()\n", "issue": "TypeError: cafile must be None or a byte string?\n### Before submitting your issue:\n\nEnable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)\n\nBranch/Commit: Latest (Saturday 29th Oct)\nOS: Debian Linux (FeralHosting Seedbox)\nWhat you did: Updated to the latest commit\nWhat happened: Medusa won't run - spits out this log (below)\nWhat you expected: For it to run\nLogs:\n\n```\n python ~/.sickrage/SickBeard.py -d -p 10101\nTraceback (most recent call last):\n File \"/media/sdq1/home/whipcracker/.sickrage/SickBeard.py\", line 9, in <module>\n from start import Application\n File \"/media/sdq1/home/whipcracker/.sickrage/start.py\", line 104, in <module>\n from medusa.server.core import AppWebServer\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/server/core.py\", line 17, in <module>\n from .web import CalendarHandler, KeyHandler, LoginHandler, LogoutHandler\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/__init__.py\", line 7, in <module>\n from .home import Home, HomeAddShows, HomeChangeLog, HomeIRC, HomeNews, HomePostProcess\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/home/__init__.py\", line 4, in <module>\n from .add_shows import HomeAddShows\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/server/web/home/add_shows.py\", line 29, in <module>\n from ....show.recommendations.trakt import TraktPopular\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/show/recommendations/trakt.py\", line 170, in <module>\n tvdb_api_v2 = get_tvdbv2_api()\n File \"/media/sdq1/home/whipcracker/.sickrage/medusa/show/recommendations/trakt.py\", line 41, in get_tvdbv2_api\n access_token = auth_api.login_post(authentication_string)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/apis/authentication_api.py\", line 124, in login_post\n callback=params.get('callback'))\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py\", line 324, in call_api\n response_type, auth_settings, callback)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py\", line 150, in __call_api\n post_params=post_params, body=body)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/api_client.py\", line 360, in request\n body=body)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/rest.py\", line 208, in POST\n body=body)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/tvdbapiv2/rest.py\", line 143, in request\n headers=headers)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/request.py\", line 73, in request\n **urlopen_kw)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/request.py\", line 151, in request_encode_body\n return self.urlopen(method, url, **extra_kw)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/poolmanager.py\", line 165, in urlopen\n response = conn.urlopen(method, u.request_uri, **kw)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py\", line 578, in urlopen\n chunked=chunked)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py\", line 351, in _make_request\n self._validate_conn(conn)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connectionpool.py\", line 814, in _validate_conn\n conn.connect()\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/connection.py\", line 289, in connect\n ssl_version=resolved_ssl_version)\n File \"/media/sdq1/home/whipcracker/.sickrage/lib/requests/packages/urllib3/contrib/pyopenssl.py\", line 328, in ssl_wrap_socket\n ctx.load_verify_locations(ca_certs, ca_cert_dir)\n File \"/usr/lib/python2.7/dist-packages/OpenSSL/SSL.py\", line 303, in load_verify_locations\n raise TypeError(\"cafile must be None or a byte string\")\nTypeError: cafile must be None or a byte string\n\n```\n\nI've tried deleting the folder and tried a new git clone, but I have the same issue. Never been a problem until the new update. Hope someone can help!\n\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Medusa.\n#\n# Medusa is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Medusa is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Medusa. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import unicode_literals\n\nimport medusa as app\nimport requests\nfrom simpleanidb import Anidb\nfrom traktor import (TokenExpiredException, TraktApi, TraktException)\nfrom tvdbapiv2 import (ApiClient, AuthenticationApi, SeriesApi)\nfrom .recommended import RecommendedShow\nfrom ... import logger\nfrom ...helper.common import try_int\nfrom ...helper.exceptions import MultipleShowObjectsException, ex\nfrom ...indexers.indexer_config import INDEXER_TVDBV2\n\n\ndef get_tvdbv2_api():\n \"\"\"Initiate the tvdb api v2.\"\"\"\n api_base_url = 'https://api.thetvdb.com'\n\n # client_id = 'username' # (optional! Only required for the /user routes)\n # client_secret = 'pass' # (optional! Only required for the /user routes)\n apikey = '0629B785CE550C8D'\n\n authentication_string = {'apikey': apikey, 'username': '', 'userpass': ''}\n unauthenticated_client = ApiClient(api_base_url)\n auth_api = AuthenticationApi(unauthenticated_client)\n access_token = auth_api.login_post(authentication_string)\n auth_client = ApiClient(api_base_url, 'Authorization', 'Bearer ' + access_token.token)\n series_api = SeriesApi(auth_client)\n\n return series_api\n\n\nclass TraktPopular(object):\n \"\"\"This class retrieves a speficed recommended show list from Trakt.\n\n The list of returned shows is mapped to a RecommendedShow object\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the trakt recommended list object.\"\"\"\n self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__\n self.session = requests.Session()\n self.recommender = \"Trakt Popular\"\n self.default_img_src = 'trakt-default.png'\n self.anidb = Anidb(cache_dir=app.CACHE_DIR)\n\n def _create_recommended_show(self, show_obj):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n rec_show = RecommendedShow(self,\n show_obj['show']['ids'], show_obj['show']['title'],\n INDEXER_TVDBV2, # indexer\n show_obj['show']['ids']['tvdb'],\n **{'rating': show_obj['show']['rating'],\n 'votes': try_int(show_obj['show']['votes'], '0'),\n 'image_href': 'http://www.trakt.tv/shows/{0}'.format(show_obj['show']['ids']['slug']),\n # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',\n # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}\n 'ids': show_obj['show']['ids']\n }\n )\n\n use_default = None\n image = None\n try:\n image = tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n except Exception:\n use_default = self.default_img_src\n logger.log('Missing poster on TheTVDB for show %s' % (show_obj['show']['title']), logger.DEBUG)\n\n rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)\n # As the method below requires allot of resources, i've only enabled it when\n # the shows language or country is 'jp' (japanese). Looks a litle bit akward,\n # but alternative is allot of resource used\n if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:\n rec_show.check_if_anime(self.anidb, show_obj['show']['ids']['tvdb'])\n\n return rec_show\n\n @staticmethod\n def fetch_and_refresh_token(trakt_api, path):\n \"\"\"Fetch shows from trakt and store the refresh token when needed.\"\"\"\n try:\n library_shows = trakt_api.request(path) or []\n if trakt_api.access_token_refreshed:\n app.TRAKT_ACCESS_TOKEN = trakt_api.access_token\n app.TRAKT_REFRESH_TOKEN = trakt_api.refresh_token\n except TokenExpiredException:\n app.TRAKT_ACCESS_TOKEN = ''\n raise\n\n return library_shows\n\n def fetch_popular_shows(self, page_url=None, trakt_list=None): # pylint: disable=too-many-nested-blocks,too-many-branches\n \"\"\"Get a list of popular shows from different Trakt lists based on a provided trakt_list.\n\n :param page_url: the page url opened to the base api url, for retreiving a specific list\n :param trakt_list: a description of the trakt list\n :return: A list of RecommendedShow objects, an empty list of none returned\n :throw: ``Exception`` if an Exception is thrown not handled by the libtrats exceptions\n \"\"\"\n trending_shows = []\n removed_from_medusa = []\n\n # Create a trakt settings dict\n trakt_settings = {'trakt_api_secret': app.TRAKT_API_SECRET, 'trakt_api_key': app.TRAKT_API_KEY,\n 'trakt_access_token': app.TRAKT_ACCESS_TOKEN}\n\n trakt_api = TraktApi(timeout=app.TRAKT_TIMEOUT, ssl_verify=app.SSL_VERIFY, **trakt_settings)\n\n try: # pylint: disable=too-many-nested-blocks\n not_liked_show = ''\n if app.TRAKT_ACCESS_TOKEN != '':\n library_shows = self.fetch_and_refresh_token(trakt_api, 'sync/watched/shows?extended=noseasons') + \\\n self.fetch_and_refresh_token(trakt_api, 'sync/collection/shows?extended=full')\n\n medusa_shows = [show.indexerid for show in app.showList if show.indexerid]\n removed_from_medusa = [lshow['show']['ids']['tvdb'] for lshow in library_shows if lshow['show']['ids']['tvdb'] not in medusa_shows]\n\n if app.TRAKT_BLACKLIST_NAME is not None and app.TRAKT_BLACKLIST_NAME:\n not_liked_show = trakt_api.request('users/' + app.TRAKT_USERNAME + '/lists/' +\n app.TRAKT_BLACKLIST_NAME + '/items') or []\n else:\n logger.log('Trakt blacklist name is empty', logger.DEBUG)\n\n if trakt_list not in ['recommended', 'newshow', 'newseason']:\n limit_show = '?limit=' + str(100 + len(not_liked_show)) + '&'\n else:\n limit_show = '?'\n\n shows = self.fetch_and_refresh_token(trakt_api, page_url + limit_show + 'extended=full,images') or []\n\n for show in shows:\n try:\n if 'show' not in show:\n show['show'] = show\n\n if not_liked_show:\n if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb']\n for show in not_liked_show if show['type'] == 'show'):\n trending_shows.append(self._create_recommended_show(show))\n else:\n trending_shows.append(self._create_recommended_show(show))\n\n except MultipleShowObjectsException:\n continue\n\n blacklist = app.TRAKT_BLACKLIST_NAME not in ''\n\n except TraktException as e:\n logger.log('Could not connect to Trakt service: %s' % ex(e), logger.WARNING)\n raise\n\n return blacklist, trending_shows, removed_from_medusa\n\ntvdb_api_v2 = get_tvdbv2_api()\n", "path": "medusa/show/recommendations/trakt.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Medusa.\n#\n# Medusa is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Medusa is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Medusa. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import unicode_literals\n\nimport medusa as app\nimport requests\nfrom simpleanidb import Anidb\nfrom traktor import (TokenExpiredException, TraktApi, TraktException)\nfrom tvdbapiv2 import (ApiClient, AuthenticationApi, SeriesApi)\nfrom .recommended import RecommendedShow\nfrom ... import logger\nfrom ...helper.common import try_int\nfrom ...helper.exceptions import MultipleShowObjectsException, ex\nfrom ...indexers.indexer_config import INDEXER_TVDBV2\n\n\ndef get_tvdbv2_api():\n \"\"\"Initiate the tvdb api v2.\"\"\"\n api_base_url = 'https://api.thetvdb.com'\n\n # client_id = 'username' # (optional! Only required for the /user routes)\n # client_secret = 'pass' # (optional! Only required for the /user routes)\n apikey = '0629B785CE550C8D'\n\n authentication_string = {'apikey': apikey, 'username': '', 'userpass': ''}\n unauthenticated_client = ApiClient(api_base_url)\n auth_api = AuthenticationApi(unauthenticated_client)\n access_token = auth_api.login_post(authentication_string)\n auth_client = ApiClient(api_base_url, 'Authorization', 'Bearer ' + access_token.token)\n series_api = SeriesApi(auth_client)\n\n return series_api\n\n\nclass TraktPopular(object):\n \"\"\"This class retrieves a speficed recommended show list from Trakt.\n\n The list of returned shows is mapped to a RecommendedShow object\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the trakt recommended list object.\"\"\"\n self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__\n self.session = requests.Session()\n self.recommender = \"Trakt Popular\"\n self.default_img_src = 'trakt-default.png'\n self.anidb = Anidb(cache_dir=app.CACHE_DIR)\n self.tvdb_api_v2 = get_tvdbv2_api()\n\n def _create_recommended_show(self, show_obj):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n rec_show = RecommendedShow(self,\n show_obj['show']['ids'], show_obj['show']['title'],\n INDEXER_TVDBV2, # indexer\n show_obj['show']['ids']['tvdb'],\n **{'rating': show_obj['show']['rating'],\n 'votes': try_int(show_obj['show']['votes'], '0'),\n 'image_href': 'http://www.trakt.tv/shows/{0}'.format(show_obj['show']['ids']['slug']),\n # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',\n # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}\n 'ids': show_obj['show']['ids']\n }\n )\n\n use_default = None\n image = None\n try:\n image = self.tvdb_api_v2.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n except Exception:\n use_default = self.default_img_src\n logger.log('Missing poster on TheTVDB for show %s' % (show_obj['show']['title']), logger.DEBUG)\n\n rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)\n # As the method below requires allot of resources, i've only enabled it when\n # the shows language or country is 'jp' (japanese). Looks a litle bit akward,\n # but alternative is allot of resource used\n if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:\n rec_show.check_if_anime(self.anidb, show_obj['show']['ids']['tvdb'])\n\n return rec_show\n\n @staticmethod\n def fetch_and_refresh_token(trakt_api, path):\n \"\"\"Fetch shows from trakt and store the refresh token when needed.\"\"\"\n try:\n library_shows = trakt_api.request(path) or []\n if trakt_api.access_token_refreshed:\n app.TRAKT_ACCESS_TOKEN = trakt_api.access_token\n app.TRAKT_REFRESH_TOKEN = trakt_api.refresh_token\n except TokenExpiredException:\n app.TRAKT_ACCESS_TOKEN = ''\n raise\n\n return library_shows\n\n def fetch_popular_shows(self, page_url=None, trakt_list=None): # pylint: disable=too-many-nested-blocks,too-many-branches\n \"\"\"Get a list of popular shows from different Trakt lists based on a provided trakt_list.\n\n :param page_url: the page url opened to the base api url, for retreiving a specific list\n :param trakt_list: a description of the trakt list\n :return: A list of RecommendedShow objects, an empty list of none returned\n :throw: ``Exception`` if an Exception is thrown not handled by the libtrats exceptions\n \"\"\"\n trending_shows = []\n removed_from_medusa = []\n\n # Create a trakt settings dict\n trakt_settings = {'trakt_api_secret': app.TRAKT_API_SECRET, 'trakt_api_key': app.TRAKT_API_KEY,\n 'trakt_access_token': app.TRAKT_ACCESS_TOKEN}\n\n trakt_api = TraktApi(timeout=app.TRAKT_TIMEOUT, ssl_verify=app.SSL_VERIFY, **trakt_settings)\n\n try: # pylint: disable=too-many-nested-blocks\n not_liked_show = ''\n if app.TRAKT_ACCESS_TOKEN != '':\n library_shows = self.fetch_and_refresh_token(trakt_api, 'sync/watched/shows?extended=noseasons') + \\\n self.fetch_and_refresh_token(trakt_api, 'sync/collection/shows?extended=full')\n\n medusa_shows = [show.indexerid for show in app.showList if show.indexerid]\n removed_from_medusa = [lshow['show']['ids']['tvdb'] for lshow in library_shows if lshow['show']['ids']['tvdb'] not in medusa_shows]\n\n if app.TRAKT_BLACKLIST_NAME is not None and app.TRAKT_BLACKLIST_NAME:\n not_liked_show = trakt_api.request('users/' + app.TRAKT_USERNAME + '/lists/' +\n app.TRAKT_BLACKLIST_NAME + '/items') or []\n else:\n logger.log('Trakt blacklist name is empty', logger.DEBUG)\n\n if trakt_list not in ['recommended', 'newshow', 'newseason']:\n limit_show = '?limit=' + str(100 + len(not_liked_show)) + '&'\n else:\n limit_show = '?'\n\n shows = self.fetch_and_refresh_token(trakt_api, page_url + limit_show + 'extended=full,images') or []\n\n for show in shows:\n try:\n if 'show' not in show:\n show['show'] = show\n\n if not_liked_show:\n if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb']\n for show in not_liked_show if show['type'] == 'show'):\n trending_shows.append(self._create_recommended_show(show))\n else:\n trending_shows.append(self._create_recommended_show(show))\n\n except MultipleShowObjectsException:\n continue\n\n blacklist = app.TRAKT_BLACKLIST_NAME not in ''\n\n except TraktException as e:\n logger.log('Could not connect to Trakt service: %s' % ex(e), logger.WARNING)\n raise\n\n return blacklist, trending_shows, removed_from_medusa\n", "path": "medusa/show/recommendations/trakt.py"}]} | 3,782 | 330 |
gh_patches_debug_41563 | rasdani/github-patches | git_diff | ray-project__ray-5221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log monitor report UnicodeDecodeError
<!--
General questions should be asked on the mailing list [email protected].
Questions about how to use Ray should be asked on
[StackOverflow](https://stackoverflow.com/questions/tagged/ray).
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: centos 7
- **Ray installed from (source or binary)**: binay
- **Ray version**: 0.7.2 0.8
- **Python version**: python 3.7
- **Exact command to reproduce**:
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
Now the `log_monitor` monitor all the files under logs. This could be causing the following errors when we read those log file with `VIM` because of the `.*.swap` file will be created.
```Traceback (most recent call last):
File "/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 278, in <module>
raise e
File "/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 268, in <module>
log_monitor.run()
File "/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 219, in run
anything_published = self.check_log_files_and_publish_updates()
File "/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 175, in check_log_files_and_publish_updates
next_line = file_info.file_handle.readline()
File "/home/xianyang/anaconda3/lib/python3.7/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xca in position 21: invalid continuation byte
```
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/log_monitor.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import argparse
6 import errno
7 import json
8 import logging
9 import os
10 import time
11 import traceback
12
13 import ray.ray_constants as ray_constants
14 import ray.services as services
15 import ray.utils
16
17 # Logger for this module. It should be configured at the entry point
18 # into the program using Ray. Ray provides a default configuration at
19 # entry/init points.
20 logger = logging.getLogger(__name__)
21
22
23 class LogFileInfo(object):
24 def __init__(self,
25 filename=None,
26 size_when_last_opened=None,
27 file_position=None,
28 file_handle=None):
29 assert (filename is not None and size_when_last_opened is not None
30 and file_position is not None)
31 self.filename = filename
32 self.size_when_last_opened = size_when_last_opened
33 self.file_position = file_position
34 self.file_handle = file_handle
35 self.worker_pid = None
36
37
38 class LogMonitor(object):
39 """A monitor process for monitoring Ray log files.
40
41 This class mantains a list of open files and a list of closed log files. We
42 can't simply leave all files open because we'll run out of file
43 descriptors.
44
45 The "run" method of this class will cycle between doing several things:
46 1. First, it will check if any new files have appeared in the log
47 directory. If so, they will be added to the list of closed files.
48 2. Then, if we are unable to open any new files, we will close all of the
49 files.
50 3. Then, we will open as many closed files as we can that may have new
51 lines (judged by an increase in file size since the last time the file
52 was opened).
53 4. Then we will loop through the open files and see if there are any new
54 lines in the file. If so, we will publish them to Redis.
55
56 Attributes:
57 host (str): The hostname of this machine. Used to improve the log
58 messages published to Redis.
59 logs_dir (str): The directory that the log files are in.
60 redis_client: A client used to communicate with the Redis server.
61 log_filenames (set): This is the set of filenames of all files in
62 open_file_infos and closed_file_infos.
63 open_file_infos (list[LogFileInfo]): Info for all of the open files.
64 closed_file_infos (list[LogFileInfo]): Info for all of the closed
65 files.
66 can_open_more_files (bool): True if we can still open more files and
67 false otherwise.
68 """
69
70 def __init__(self, logs_dir, redis_address, redis_password=None):
71 """Initialize the log monitor object."""
72 self.ip = services.get_node_ip_address()
73 self.logs_dir = logs_dir
74 self.redis_client = ray.services.create_redis_client(
75 redis_address, password=redis_password)
76 self.log_filenames = set()
77 self.open_file_infos = []
78 self.closed_file_infos = []
79 self.can_open_more_files = True
80
81 def close_all_files(self):
82 """Close all open files (so that we can open more)."""
83 while len(self.open_file_infos) > 0:
84 file_info = self.open_file_infos.pop(0)
85 file_info.file_handle.close()
86 file_info.file_handle = None
87 self.closed_file_infos.append(file_info)
88 self.can_open_more_files = True
89
90 def update_log_filenames(self):
91 """Update the list of log files to monitor."""
92 log_filenames = os.listdir(self.logs_dir)
93
94 for log_filename in log_filenames:
95 full_path = os.path.join(self.logs_dir, log_filename)
96 if full_path not in self.log_filenames:
97 self.log_filenames.add(full_path)
98 self.closed_file_infos.append(
99 LogFileInfo(
100 filename=full_path,
101 size_when_last_opened=0,
102 file_position=0,
103 file_handle=None))
104 logger.info("Beginning to track file {}".format(log_filename))
105
106 def open_closed_files(self):
107 """Open some closed files if they may have new lines.
108
109 Opening more files may require us to close some of the already open
110 files.
111 """
112 if not self.can_open_more_files:
113 # If we can't open any more files. Close all of the files.
114 self.close_all_files()
115
116 files_with_no_updates = []
117 while len(self.closed_file_infos) > 0:
118 if (len(self.open_file_infos) >=
119 ray_constants.LOG_MONITOR_MAX_OPEN_FILES):
120 self.can_open_more_files = False
121 break
122
123 file_info = self.closed_file_infos.pop(0)
124 assert file_info.file_handle is None
125 # Get the file size to see if it has gotten bigger since we last
126 # opened it.
127 try:
128 file_size = os.path.getsize(file_info.filename)
129 except (IOError, OSError) as e:
130 # Catch "file not found" errors.
131 if e.errno == errno.ENOENT:
132 logger.warning("Warning: The file {} was not "
133 "found.".format(file_info.filename))
134 self.log_filenames.remove(file_info.filename)
135 continue
136 raise e
137
138 # If some new lines have been added to this file, try to reopen the
139 # file.
140 if file_size > file_info.size_when_last_opened:
141 try:
142 f = open(file_info.filename, "r")
143 except (IOError, OSError) as e:
144 if e.errno == errno.ENOENT:
145 logger.warning("Warning: The file {} was not "
146 "found.".format(file_info.filename))
147 self.log_filenames.remove(file_info.filename)
148 continue
149 else:
150 raise e
151
152 f.seek(file_info.file_position)
153 file_info.filesize_when_last_opened = file_size
154 file_info.file_handle = f
155 self.open_file_infos.append(file_info)
156 else:
157 files_with_no_updates.append(file_info)
158
159 # Add the files with no changes back to the list of closed files.
160 self.closed_file_infos += files_with_no_updates
161
162 def check_log_files_and_publish_updates(self):
163 """Get any changes to the log files and push updates to Redis.
164
165 Returns:
166 True if anything was published and false otherwise.
167 """
168 anything_published = False
169 for file_info in self.open_file_infos:
170 assert not file_info.file_handle.closed
171
172 lines_to_publish = []
173 max_num_lines_to_read = 100
174 for _ in range(max_num_lines_to_read):
175 next_line = file_info.file_handle.readline()
176 if next_line == "":
177 break
178 if next_line[-1] == "\n":
179 next_line = next_line[:-1]
180 lines_to_publish.append(next_line)
181
182 # Publish the lines if this is a worker process.
183 filename = file_info.filename.split("/")[-1]
184 is_worker = (filename.startswith("worker")
185 and (filename.endswith("out")
186 or filename.endswith("err")))
187
188 if is_worker and file_info.file_position == 0:
189 if (len(lines_to_publish) > 0 and
190 lines_to_publish[0].startswith("Ray worker pid: ")):
191 file_info.worker_pid = int(
192 lines_to_publish[0].split(" ")[-1])
193 lines_to_publish = lines_to_publish[1:]
194
195 # Record the current position in the file.
196 file_info.file_position = file_info.file_handle.tell()
197
198 if len(lines_to_publish) > 0 and is_worker:
199 self.redis_client.publish(
200 ray.gcs_utils.LOG_FILE_CHANNEL,
201 json.dumps({
202 "ip": self.ip,
203 "pid": file_info.worker_pid,
204 "lines": lines_to_publish
205 }))
206 anything_published = True
207
208 return anything_published
209
210 def run(self):
211 """Run the log monitor.
212
213 This will query Redis once every second to check if there are new log
214 files to monitor. It will also store those log files in Redis.
215 """
216 while True:
217 self.update_log_filenames()
218 self.open_closed_files()
219 anything_published = self.check_log_files_and_publish_updates()
220 # If nothing was published, then wait a little bit before checking
221 # for logs to avoid using too much CPU.
222 if not anything_published:
223 time.sleep(0.05)
224
225
226 if __name__ == "__main__":
227 parser = argparse.ArgumentParser(
228 description=("Parse Redis server for the "
229 "log monitor to connect "
230 "to."))
231 parser.add_argument(
232 "--redis-address",
233 required=True,
234 type=str,
235 help="The address to use for Redis.")
236 parser.add_argument(
237 "--redis-password",
238 required=False,
239 type=str,
240 default=None,
241 help="the password to use for Redis")
242 parser.add_argument(
243 "--logging-level",
244 required=False,
245 type=str,
246 default=ray_constants.LOGGER_LEVEL,
247 choices=ray_constants.LOGGER_LEVEL_CHOICES,
248 help=ray_constants.LOGGER_LEVEL_HELP)
249 parser.add_argument(
250 "--logging-format",
251 required=False,
252 type=str,
253 default=ray_constants.LOGGER_FORMAT,
254 help=ray_constants.LOGGER_FORMAT_HELP)
255 parser.add_argument(
256 "--logs-dir",
257 required=True,
258 type=str,
259 help="Specify the path of the temporary directory used by Ray "
260 "processes.")
261 args = parser.parse_args()
262 ray.utils.setup_logger(args.logging_level, args.logging_format)
263
264 log_monitor = LogMonitor(
265 args.logs_dir, args.redis_address, redis_password=args.redis_password)
266
267 try:
268 log_monitor.run()
269 except Exception as e:
270 # Something went wrong, so push an error to all drivers.
271 redis_client = ray.services.create_redis_client(
272 args.redis_address, password=args.redis_password)
273 traceback_str = ray.utils.format_error_message(traceback.format_exc())
274 message = ("The log monitor on node {} failed with the following "
275 "error:\n{}".format(os.uname()[1], traceback_str))
276 ray.utils.push_error_to_driver_through_redis(
277 redis_client, ray_constants.LOG_MONITOR_DIED_ERROR, message)
278 raise e
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/log_monitor.py b/python/ray/log_monitor.py
--- a/python/ray/log_monitor.py
+++ b/python/ray/log_monitor.py
@@ -4,6 +4,7 @@
import argparse
import errno
+import glob
import json
import logging
import os
@@ -89,18 +90,20 @@
def update_log_filenames(self):
"""Update the list of log files to monitor."""
- log_filenames = os.listdir(self.logs_dir)
-
- for log_filename in log_filenames:
- full_path = os.path.join(self.logs_dir, log_filename)
- if full_path not in self.log_filenames:
- self.log_filenames.add(full_path)
+ # we only monior worker log files
+ log_file_paths = glob.glob("{}/worker*[.out|.err]".format(
+ self.logs_dir))
+ for file_path in log_file_paths:
+ if os.path.isfile(
+ file_path) and file_path not in self.log_filenames:
+ self.log_filenames.add(file_path)
self.closed_file_infos.append(
LogFileInfo(
- filename=full_path,
+ filename=file_path,
size_when_last_opened=0,
file_position=0,
file_handle=None))
+ log_filename = os.path.basename(file_path)
logger.info("Beginning to track file {}".format(log_filename))
def open_closed_files(self):
@@ -172,20 +175,21 @@
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
- next_line = file_info.file_handle.readline()
- if next_line == "":
- break
- if next_line[-1] == "\n":
- next_line = next_line[:-1]
- lines_to_publish.append(next_line)
-
- # Publish the lines if this is a worker process.
- filename = file_info.filename.split("/")[-1]
- is_worker = (filename.startswith("worker")
- and (filename.endswith("out")
- or filename.endswith("err")))
-
- if is_worker and file_info.file_position == 0:
+ try:
+ next_line = file_info.file_handle.readline()
+ if next_line == "":
+ break
+ if next_line[-1] == "\n":
+ next_line = next_line[:-1]
+ lines_to_publish.append(next_line)
+ except Exception:
+ logger.error("Error: Reading file: {}, position: {} "
+ "failed.".format(
+ file_info.full_path,
+ file_info.file_info.file_handle.tell()))
+ raise
+
+ if file_info.file_position == 0:
if (len(lines_to_publish) > 0 and
lines_to_publish[0].startswith("Ray worker pid: ")):
file_info.worker_pid = int(
@@ -195,7 +199,7 @@
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
- if len(lines_to_publish) > 0 and is_worker:
+ if len(lines_to_publish) > 0:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps({
| {"golden_diff": "diff --git a/python/ray/log_monitor.py b/python/ray/log_monitor.py\n--- a/python/ray/log_monitor.py\n+++ b/python/ray/log_monitor.py\n@@ -4,6 +4,7 @@\n \n import argparse\n import errno\n+import glob\n import json\n import logging\n import os\n@@ -89,18 +90,20 @@\n \n def update_log_filenames(self):\n \"\"\"Update the list of log files to monitor.\"\"\"\n- log_filenames = os.listdir(self.logs_dir)\n-\n- for log_filename in log_filenames:\n- full_path = os.path.join(self.logs_dir, log_filename)\n- if full_path not in self.log_filenames:\n- self.log_filenames.add(full_path)\n+ # we only monior worker log files\n+ log_file_paths = glob.glob(\"{}/worker*[.out|.err]\".format(\n+ self.logs_dir))\n+ for file_path in log_file_paths:\n+ if os.path.isfile(\n+ file_path) and file_path not in self.log_filenames:\n+ self.log_filenames.add(file_path)\n self.closed_file_infos.append(\n LogFileInfo(\n- filename=full_path,\n+ filename=file_path,\n size_when_last_opened=0,\n file_position=0,\n file_handle=None))\n+ log_filename = os.path.basename(file_path)\n logger.info(\"Beginning to track file {}\".format(log_filename))\n \n def open_closed_files(self):\n@@ -172,20 +175,21 @@\n lines_to_publish = []\n max_num_lines_to_read = 100\n for _ in range(max_num_lines_to_read):\n- next_line = file_info.file_handle.readline()\n- if next_line == \"\":\n- break\n- if next_line[-1] == \"\\n\":\n- next_line = next_line[:-1]\n- lines_to_publish.append(next_line)\n-\n- # Publish the lines if this is a worker process.\n- filename = file_info.filename.split(\"/\")[-1]\n- is_worker = (filename.startswith(\"worker\")\n- and (filename.endswith(\"out\")\n- or filename.endswith(\"err\")))\n-\n- if is_worker and file_info.file_position == 0:\n+ try:\n+ next_line = file_info.file_handle.readline()\n+ if next_line == \"\":\n+ break\n+ if next_line[-1] == \"\\n\":\n+ next_line = next_line[:-1]\n+ lines_to_publish.append(next_line)\n+ except Exception:\n+ logger.error(\"Error: Reading file: {}, position: {} \"\n+ \"failed.\".format(\n+ file_info.full_path,\n+ file_info.file_info.file_handle.tell()))\n+ raise\n+\n+ if file_info.file_position == 0:\n if (len(lines_to_publish) > 0 and\n lines_to_publish[0].startswith(\"Ray worker pid: \")):\n file_info.worker_pid = int(\n@@ -195,7 +199,7 @@\n # Record the current position in the file.\n file_info.file_position = file_info.file_handle.tell()\n \n- if len(lines_to_publish) > 0 and is_worker:\n+ if len(lines_to_publish) > 0:\n self.redis_client.publish(\n ray.gcs_utils.LOG_FILE_CHANNEL,\n json.dumps({\n", "issue": "log monitor report UnicodeDecodeError\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\nQuestions about how to use Ray should be asked on\r\n[StackOverflow](https://stackoverflow.com/questions/tagged/ray).\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: centos 7\r\n- **Ray installed from (source or binary)**: binay\r\n- **Ray version**: 0.7.2 0.8\r\n- **Python version**: python 3.7\r\n- **Exact command to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Ray version with\r\n\r\npython -c \"import ray; print(ray.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nNow the `log_monitor` monitor all the files under logs. This could be causing the following errors when we read those log file with `VIM` because of the `.*.swap` file will be created. \r\n```Traceback (most recent call last):\r\n File \"/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py\", line 278, in <module>\r\n raise e\r\n File \"/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py\", line 268, in <module>\r\n log_monitor.run()\r\n File \"/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py\", line 219, in run\r\n anything_published = self.check_log_files_and_publish_updates()\r\n File \"/home/xianyang/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py\", line 175, in check_log_files_and_publish_updates\r\n next_line = file_info.file_handle.readline()\r\n File \"/home/xianyang/anaconda3/lib/python3.7/codecs.py\", line 322, in decode\r\n (result, consumed) = self._buffer_decode(data, self.errors, final)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xca in position 21: invalid continuation byte\r\n\r\n```\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport errno\nimport json\nimport logging\nimport os\nimport time\nimport traceback\n\nimport ray.ray_constants as ray_constants\nimport ray.services as services\nimport ray.utils\n\n# Logger for this module. It should be configured at the entry point\n# into the program using Ray. Ray provides a default configuration at\n# entry/init points.\nlogger = logging.getLogger(__name__)\n\n\nclass LogFileInfo(object):\n def __init__(self,\n filename=None,\n size_when_last_opened=None,\n file_position=None,\n file_handle=None):\n assert (filename is not None and size_when_last_opened is not None\n and file_position is not None)\n self.filename = filename\n self.size_when_last_opened = size_when_last_opened\n self.file_position = file_position\n self.file_handle = file_handle\n self.worker_pid = None\n\n\nclass LogMonitor(object):\n \"\"\"A monitor process for monitoring Ray log files.\n\n This class mantains a list of open files and a list of closed log files. We\n can't simply leave all files open because we'll run out of file\n descriptors.\n\n The \"run\" method of this class will cycle between doing several things:\n 1. First, it will check if any new files have appeared in the log\n directory. If so, they will be added to the list of closed files.\n 2. Then, if we are unable to open any new files, we will close all of the\n files.\n 3. Then, we will open as many closed files as we can that may have new\n lines (judged by an increase in file size since the last time the file\n was opened).\n 4. Then we will loop through the open files and see if there are any new\n lines in the file. If so, we will publish them to Redis.\n\n Attributes:\n host (str): The hostname of this machine. Used to improve the log\n messages published to Redis.\n logs_dir (str): The directory that the log files are in.\n redis_client: A client used to communicate with the Redis server.\n log_filenames (set): This is the set of filenames of all files in\n open_file_infos and closed_file_infos.\n open_file_infos (list[LogFileInfo]): Info for all of the open files.\n closed_file_infos (list[LogFileInfo]): Info for all of the closed\n files.\n can_open_more_files (bool): True if we can still open more files and\n false otherwise.\n \"\"\"\n\n def __init__(self, logs_dir, redis_address, redis_password=None):\n \"\"\"Initialize the log monitor object.\"\"\"\n self.ip = services.get_node_ip_address()\n self.logs_dir = logs_dir\n self.redis_client = ray.services.create_redis_client(\n redis_address, password=redis_password)\n self.log_filenames = set()\n self.open_file_infos = []\n self.closed_file_infos = []\n self.can_open_more_files = True\n\n def close_all_files(self):\n \"\"\"Close all open files (so that we can open more).\"\"\"\n while len(self.open_file_infos) > 0:\n file_info = self.open_file_infos.pop(0)\n file_info.file_handle.close()\n file_info.file_handle = None\n self.closed_file_infos.append(file_info)\n self.can_open_more_files = True\n\n def update_log_filenames(self):\n \"\"\"Update the list of log files to monitor.\"\"\"\n log_filenames = os.listdir(self.logs_dir)\n\n for log_filename in log_filenames:\n full_path = os.path.join(self.logs_dir, log_filename)\n if full_path not in self.log_filenames:\n self.log_filenames.add(full_path)\n self.closed_file_infos.append(\n LogFileInfo(\n filename=full_path,\n size_when_last_opened=0,\n file_position=0,\n file_handle=None))\n logger.info(\"Beginning to track file {}\".format(log_filename))\n\n def open_closed_files(self):\n \"\"\"Open some closed files if they may have new lines.\n\n Opening more files may require us to close some of the already open\n files.\n \"\"\"\n if not self.can_open_more_files:\n # If we can't open any more files. Close all of the files.\n self.close_all_files()\n\n files_with_no_updates = []\n while len(self.closed_file_infos) > 0:\n if (len(self.open_file_infos) >=\n ray_constants.LOG_MONITOR_MAX_OPEN_FILES):\n self.can_open_more_files = False\n break\n\n file_info = self.closed_file_infos.pop(0)\n assert file_info.file_handle is None\n # Get the file size to see if it has gotten bigger since we last\n # opened it.\n try:\n file_size = os.path.getsize(file_info.filename)\n except (IOError, OSError) as e:\n # Catch \"file not found\" errors.\n if e.errno == errno.ENOENT:\n logger.warning(\"Warning: The file {} was not \"\n \"found.\".format(file_info.filename))\n self.log_filenames.remove(file_info.filename)\n continue\n raise e\n\n # If some new lines have been added to this file, try to reopen the\n # file.\n if file_size > file_info.size_when_last_opened:\n try:\n f = open(file_info.filename, \"r\")\n except (IOError, OSError) as e:\n if e.errno == errno.ENOENT:\n logger.warning(\"Warning: The file {} was not \"\n \"found.\".format(file_info.filename))\n self.log_filenames.remove(file_info.filename)\n continue\n else:\n raise e\n\n f.seek(file_info.file_position)\n file_info.filesize_when_last_opened = file_size\n file_info.file_handle = f\n self.open_file_infos.append(file_info)\n else:\n files_with_no_updates.append(file_info)\n\n # Add the files with no changes back to the list of closed files.\n self.closed_file_infos += files_with_no_updates\n\n def check_log_files_and_publish_updates(self):\n \"\"\"Get any changes to the log files and push updates to Redis.\n\n Returns:\n True if anything was published and false otherwise.\n \"\"\"\n anything_published = False\n for file_info in self.open_file_infos:\n assert not file_info.file_handle.closed\n\n lines_to_publish = []\n max_num_lines_to_read = 100\n for _ in range(max_num_lines_to_read):\n next_line = file_info.file_handle.readline()\n if next_line == \"\":\n break\n if next_line[-1] == \"\\n\":\n next_line = next_line[:-1]\n lines_to_publish.append(next_line)\n\n # Publish the lines if this is a worker process.\n filename = file_info.filename.split(\"/\")[-1]\n is_worker = (filename.startswith(\"worker\")\n and (filename.endswith(\"out\")\n or filename.endswith(\"err\")))\n\n if is_worker and file_info.file_position == 0:\n if (len(lines_to_publish) > 0 and\n lines_to_publish[0].startswith(\"Ray worker pid: \")):\n file_info.worker_pid = int(\n lines_to_publish[0].split(\" \")[-1])\n lines_to_publish = lines_to_publish[1:]\n\n # Record the current position in the file.\n file_info.file_position = file_info.file_handle.tell()\n\n if len(lines_to_publish) > 0 and is_worker:\n self.redis_client.publish(\n ray.gcs_utils.LOG_FILE_CHANNEL,\n json.dumps({\n \"ip\": self.ip,\n \"pid\": file_info.worker_pid,\n \"lines\": lines_to_publish\n }))\n anything_published = True\n\n return anything_published\n\n def run(self):\n \"\"\"Run the log monitor.\n\n This will query Redis once every second to check if there are new log\n files to monitor. It will also store those log files in Redis.\n \"\"\"\n while True:\n self.update_log_filenames()\n self.open_closed_files()\n anything_published = self.check_log_files_and_publish_updates()\n # If nothing was published, then wait a little bit before checking\n # for logs to avoid using too much CPU.\n if not anything_published:\n time.sleep(0.05)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=(\"Parse Redis server for the \"\n \"log monitor to connect \"\n \"to.\"))\n parser.add_argument(\n \"--redis-address\",\n required=True,\n type=str,\n help=\"The address to use for Redis.\")\n parser.add_argument(\n \"--redis-password\",\n required=False,\n type=str,\n default=None,\n help=\"the password to use for Redis\")\n parser.add_argument(\n \"--logging-level\",\n required=False,\n type=str,\n default=ray_constants.LOGGER_LEVEL,\n choices=ray_constants.LOGGER_LEVEL_CHOICES,\n help=ray_constants.LOGGER_LEVEL_HELP)\n parser.add_argument(\n \"--logging-format\",\n required=False,\n type=str,\n default=ray_constants.LOGGER_FORMAT,\n help=ray_constants.LOGGER_FORMAT_HELP)\n parser.add_argument(\n \"--logs-dir\",\n required=True,\n type=str,\n help=\"Specify the path of the temporary directory used by Ray \"\n \"processes.\")\n args = parser.parse_args()\n ray.utils.setup_logger(args.logging_level, args.logging_format)\n\n log_monitor = LogMonitor(\n args.logs_dir, args.redis_address, redis_password=args.redis_password)\n\n try:\n log_monitor.run()\n except Exception as e:\n # Something went wrong, so push an error to all drivers.\n redis_client = ray.services.create_redis_client(\n args.redis_address, password=args.redis_password)\n traceback_str = ray.utils.format_error_message(traceback.format_exc())\n message = (\"The log monitor on node {} failed with the following \"\n \"error:\\n{}\".format(os.uname()[1], traceback_str))\n ray.utils.push_error_to_driver_through_redis(\n redis_client, ray_constants.LOG_MONITOR_DIED_ERROR, message)\n raise e\n", "path": "python/ray/log_monitor.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport errno\nimport glob\nimport json\nimport logging\nimport os\nimport time\nimport traceback\n\nimport ray.ray_constants as ray_constants\nimport ray.services as services\nimport ray.utils\n\n# Logger for this module. It should be configured at the entry point\n# into the program using Ray. Ray provides a default configuration at\n# entry/init points.\nlogger = logging.getLogger(__name__)\n\n\nclass LogFileInfo(object):\n def __init__(self,\n filename=None,\n size_when_last_opened=None,\n file_position=None,\n file_handle=None):\n assert (filename is not None and size_when_last_opened is not None\n and file_position is not None)\n self.filename = filename\n self.size_when_last_opened = size_when_last_opened\n self.file_position = file_position\n self.file_handle = file_handle\n self.worker_pid = None\n\n\nclass LogMonitor(object):\n \"\"\"A monitor process for monitoring Ray log files.\n\n This class mantains a list of open files and a list of closed log files. We\n can't simply leave all files open because we'll run out of file\n descriptors.\n\n The \"run\" method of this class will cycle between doing several things:\n 1. First, it will check if any new files have appeared in the log\n directory. If so, they will be added to the list of closed files.\n 2. Then, if we are unable to open any new files, we will close all of the\n files.\n 3. Then, we will open as many closed files as we can that may have new\n lines (judged by an increase in file size since the last time the file\n was opened).\n 4. Then we will loop through the open files and see if there are any new\n lines in the file. If so, we will publish them to Redis.\n\n Attributes:\n host (str): The hostname of this machine. Used to improve the log\n messages published to Redis.\n logs_dir (str): The directory that the log files are in.\n redis_client: A client used to communicate with the Redis server.\n log_filenames (set): This is the set of filenames of all files in\n open_file_infos and closed_file_infos.\n open_file_infos (list[LogFileInfo]): Info for all of the open files.\n closed_file_infos (list[LogFileInfo]): Info for all of the closed\n files.\n can_open_more_files (bool): True if we can still open more files and\n false otherwise.\n \"\"\"\n\n def __init__(self, logs_dir, redis_address, redis_password=None):\n \"\"\"Initialize the log monitor object.\"\"\"\n self.ip = services.get_node_ip_address()\n self.logs_dir = logs_dir\n self.redis_client = ray.services.create_redis_client(\n redis_address, password=redis_password)\n self.log_filenames = set()\n self.open_file_infos = []\n self.closed_file_infos = []\n self.can_open_more_files = True\n\n def close_all_files(self):\n \"\"\"Close all open files (so that we can open more).\"\"\"\n while len(self.open_file_infos) > 0:\n file_info = self.open_file_infos.pop(0)\n file_info.file_handle.close()\n file_info.file_handle = None\n self.closed_file_infos.append(file_info)\n self.can_open_more_files = True\n\n def update_log_filenames(self):\n \"\"\"Update the list of log files to monitor.\"\"\"\n # we only monior worker log files\n log_file_paths = glob.glob(\"{}/worker*[.out|.err]\".format(\n self.logs_dir))\n for file_path in log_file_paths:\n if os.path.isfile(\n file_path) and file_path not in self.log_filenames:\n self.log_filenames.add(file_path)\n self.closed_file_infos.append(\n LogFileInfo(\n filename=file_path,\n size_when_last_opened=0,\n file_position=0,\n file_handle=None))\n log_filename = os.path.basename(file_path)\n logger.info(\"Beginning to track file {}\".format(log_filename))\n\n def open_closed_files(self):\n \"\"\"Open some closed files if they may have new lines.\n\n Opening more files may require us to close some of the already open\n files.\n \"\"\"\n if not self.can_open_more_files:\n # If we can't open any more files. Close all of the files.\n self.close_all_files()\n\n files_with_no_updates = []\n while len(self.closed_file_infos) > 0:\n if (len(self.open_file_infos) >=\n ray_constants.LOG_MONITOR_MAX_OPEN_FILES):\n self.can_open_more_files = False\n break\n\n file_info = self.closed_file_infos.pop(0)\n assert file_info.file_handle is None\n # Get the file size to see if it has gotten bigger since we last\n # opened it.\n try:\n file_size = os.path.getsize(file_info.filename)\n except (IOError, OSError) as e:\n # Catch \"file not found\" errors.\n if e.errno == errno.ENOENT:\n logger.warning(\"Warning: The file {} was not \"\n \"found.\".format(file_info.filename))\n self.log_filenames.remove(file_info.filename)\n continue\n raise e\n\n # If some new lines have been added to this file, try to reopen the\n # file.\n if file_size > file_info.size_when_last_opened:\n try:\n f = open(file_info.filename, \"r\")\n except (IOError, OSError) as e:\n if e.errno == errno.ENOENT:\n logger.warning(\"Warning: The file {} was not \"\n \"found.\".format(file_info.filename))\n self.log_filenames.remove(file_info.filename)\n continue\n else:\n raise e\n\n f.seek(file_info.file_position)\n file_info.filesize_when_last_opened = file_size\n file_info.file_handle = f\n self.open_file_infos.append(file_info)\n else:\n files_with_no_updates.append(file_info)\n\n # Add the files with no changes back to the list of closed files.\n self.closed_file_infos += files_with_no_updates\n\n def check_log_files_and_publish_updates(self):\n \"\"\"Get any changes to the log files and push updates to Redis.\n\n Returns:\n True if anything was published and false otherwise.\n \"\"\"\n anything_published = False\n for file_info in self.open_file_infos:\n assert not file_info.file_handle.closed\n\n lines_to_publish = []\n max_num_lines_to_read = 100\n for _ in range(max_num_lines_to_read):\n try:\n next_line = file_info.file_handle.readline()\n if next_line == \"\":\n break\n if next_line[-1] == \"\\n\":\n next_line = next_line[:-1]\n lines_to_publish.append(next_line)\n except Exception:\n logger.error(\"Error: Reading file: {}, position: {} \"\n \"failed.\".format(\n file_info.full_path,\n file_info.file_info.file_handle.tell()))\n raise\n\n if file_info.file_position == 0:\n if (len(lines_to_publish) > 0 and\n lines_to_publish[0].startswith(\"Ray worker pid: \")):\n file_info.worker_pid = int(\n lines_to_publish[0].split(\" \")[-1])\n lines_to_publish = lines_to_publish[1:]\n\n # Record the current position in the file.\n file_info.file_position = file_info.file_handle.tell()\n\n if len(lines_to_publish) > 0:\n self.redis_client.publish(\n ray.gcs_utils.LOG_FILE_CHANNEL,\n json.dumps({\n \"ip\": self.ip,\n \"pid\": file_info.worker_pid,\n \"lines\": lines_to_publish\n }))\n anything_published = True\n\n return anything_published\n\n def run(self):\n \"\"\"Run the log monitor.\n\n This will query Redis once every second to check if there are new log\n files to monitor. It will also store those log files in Redis.\n \"\"\"\n while True:\n self.update_log_filenames()\n self.open_closed_files()\n anything_published = self.check_log_files_and_publish_updates()\n # If nothing was published, then wait a little bit before checking\n # for logs to avoid using too much CPU.\n if not anything_published:\n time.sleep(0.05)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=(\"Parse Redis server for the \"\n \"log monitor to connect \"\n \"to.\"))\n parser.add_argument(\n \"--redis-address\",\n required=True,\n type=str,\n help=\"The address to use for Redis.\")\n parser.add_argument(\n \"--redis-password\",\n required=False,\n type=str,\n default=None,\n help=\"the password to use for Redis\")\n parser.add_argument(\n \"--logging-level\",\n required=False,\n type=str,\n default=ray_constants.LOGGER_LEVEL,\n choices=ray_constants.LOGGER_LEVEL_CHOICES,\n help=ray_constants.LOGGER_LEVEL_HELP)\n parser.add_argument(\n \"--logging-format\",\n required=False,\n type=str,\n default=ray_constants.LOGGER_FORMAT,\n help=ray_constants.LOGGER_FORMAT_HELP)\n parser.add_argument(\n \"--logs-dir\",\n required=True,\n type=str,\n help=\"Specify the path of the temporary directory used by Ray \"\n \"processes.\")\n args = parser.parse_args()\n ray.utils.setup_logger(args.logging_level, args.logging_format)\n\n log_monitor = LogMonitor(\n args.logs_dir, args.redis_address, redis_password=args.redis_password)\n\n try:\n log_monitor.run()\n except Exception as e:\n # Something went wrong, so push an error to all drivers.\n redis_client = ray.services.create_redis_client(\n args.redis_address, password=args.redis_password)\n traceback_str = ray.utils.format_error_message(traceback.format_exc())\n message = (\"The log monitor on node {} failed with the following \"\n \"error:\\n{}\".format(os.uname()[1], traceback_str))\n ray.utils.push_error_to_driver_through_redis(\n redis_client, ray_constants.LOG_MONITOR_DIED_ERROR, message)\n raise e\n", "path": "python/ray/log_monitor.py"}]} | 3,705 | 711 |
gh_patches_debug_9067 | rasdani/github-patches | git_diff | nautobot__nautobot-5190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for an OS Model to define target OS image per device
### Proposed Functionality
Support a model to define OSs that should be used by each device and ensure there is also a relationship between vendor and platform.
### Use Case
As a user, I should be able to define the intended OS or list of supported OSs per device. I should preferably be able to run search queries within the UI using greater than and less than (<>) operators to verify if my devices.
### Database Changes
Yes, will require a new database model(s), as well as updates to Vendor and Platform models
### External Dependencies
Maybe - need to explore external library(ies) for parsing idiosyncratic vendor OS version strings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/dcim/api/urls.py`
Content:
```
1 from nautobot.core.api.routers import OrderedDefaultRouter
2
3 from . import views
4
5 router = OrderedDefaultRouter()
6 router.APIRootView = views.DCIMRootView
7
8 # Locations
9 router.register("location-types", views.LocationTypeViewSet)
10 router.register("locations", views.LocationViewSet)
11
12 # Racks
13 router.register("rack-groups", views.RackGroupViewSet)
14 router.register("racks", views.RackViewSet)
15 router.register("rack-reservations", views.RackReservationViewSet)
16
17 # Device types
18 router.register("hardware-families", views.HardwareFamilyViewSet)
19 router.register("manufacturers", views.ManufacturerViewSet)
20 router.register("device-types", views.DeviceTypeViewSet)
21
22 # Device type components
23 router.register("console-port-templates", views.ConsolePortTemplateViewSet)
24 router.register("console-server-port-templates", views.ConsoleServerPortTemplateViewSet)
25 router.register("power-port-templates", views.PowerPortTemplateViewSet)
26 router.register("power-outlet-templates", views.PowerOutletTemplateViewSet)
27 router.register("interface-templates", views.InterfaceTemplateViewSet)
28 router.register("front-port-templates", views.FrontPortTemplateViewSet)
29 router.register("rear-port-templates", views.RearPortTemplateViewSet)
30 router.register("device-bay-templates", views.DeviceBayTemplateViewSet)
31
32 # Devices
33 router.register("platforms", views.PlatformViewSet)
34 router.register("devices", views.DeviceViewSet)
35
36 # Device components
37 router.register("console-ports", views.ConsolePortViewSet)
38 router.register("console-server-ports", views.ConsoleServerPortViewSet)
39 router.register("power-ports", views.PowerPortViewSet)
40 router.register("power-outlets", views.PowerOutletViewSet)
41 router.register("interfaces", views.InterfaceViewSet)
42 router.register("front-ports", views.FrontPortViewSet)
43 router.register("rear-ports", views.RearPortViewSet)
44 router.register("device-bays", views.DeviceBayViewSet)
45 router.register("inventory-items", views.InventoryItemViewSet)
46
47 # Connections
48 router.register("console-connections", views.ConsoleConnectionViewSet, basename="consoleconnections")
49 router.register("power-connections", views.PowerConnectionViewSet, basename="powerconnections")
50 router.register(
51 "interface-connections",
52 views.InterfaceConnectionViewSet,
53 basename="interfaceconnections",
54 )
55
56 # Cables
57 router.register("cables", views.CableViewSet)
58
59 # Interface Redundancy Group
60 router.register("interface-redundancy-groups", views.InterfaceRedundancyGroupViewSet)
61 router.register("interface-redundancy-group-associations", views.InterfaceRedundancyGroupAssociationViewSet)
62
63 # Virtual chassis
64 router.register("virtual-chassis", views.VirtualChassisViewSet)
65
66 # Power
67 router.register("power-panels", views.PowerPanelViewSet)
68 router.register("power-feeds", views.PowerFeedViewSet)
69
70 # Device Redundancy Group
71 router.register("device-redundancy-groups", views.DeviceRedundancyGroupViewSet)
72
73 # Software images
74 router.register("software-images", views.SoftwareImageViewSet)
75 router.register("software-versions", views.SoftwareVersionViewSet)
76 router.register("device-type-to-software-image", views.DeviceTypeToSoftwareImageViewSet)
77
78 # Miscellaneous
79 router.register("connected-device", views.ConnectedDeviceViewSet, basename="connected-device")
80
81 app_name = "dcim-api"
82 urlpatterns = router.urls
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/dcim/api/urls.py b/nautobot/dcim/api/urls.py
--- a/nautobot/dcim/api/urls.py
+++ b/nautobot/dcim/api/urls.py
@@ -73,7 +73,7 @@
# Software images
router.register("software-images", views.SoftwareImageViewSet)
router.register("software-versions", views.SoftwareVersionViewSet)
-router.register("device-type-to-software-image", views.DeviceTypeToSoftwareImageViewSet)
+router.register("device-types-to-software-images", views.DeviceTypeToSoftwareImageViewSet)
# Miscellaneous
router.register("connected-device", views.ConnectedDeviceViewSet, basename="connected-device")
| {"golden_diff": "diff --git a/nautobot/dcim/api/urls.py b/nautobot/dcim/api/urls.py\n--- a/nautobot/dcim/api/urls.py\n+++ b/nautobot/dcim/api/urls.py\n@@ -73,7 +73,7 @@\n # Software images\n router.register(\"software-images\", views.SoftwareImageViewSet)\n router.register(\"software-versions\", views.SoftwareVersionViewSet)\n-router.register(\"device-type-to-software-image\", views.DeviceTypeToSoftwareImageViewSet)\n+router.register(\"device-types-to-software-images\", views.DeviceTypeToSoftwareImageViewSet)\n \n # Miscellaneous\n router.register(\"connected-device\", views.ConnectedDeviceViewSet, basename=\"connected-device\")\n", "issue": "Add support for an OS Model to define target OS image per device\n### Proposed Functionality\r\nSupport a model to define OSs that should be used by each device and ensure there is also a relationship between vendor and platform.\r\n\r\n### Use Case\r\nAs a user, I should be able to define the intended OS or list of supported OSs per device. I should preferably be able to run search queries within the UI using greater than and less than (<>) operators to verify if my devices.\r\n\r\n### Database Changes\r\nYes, will require a new database model(s), as well as updates to Vendor and Platform models\r\n\r\n### External Dependencies\r\nMaybe - need to explore external library(ies) for parsing idiosyncratic vendor OS version strings.\r\n\n", "before_files": [{"content": "from nautobot.core.api.routers import OrderedDefaultRouter\n\nfrom . import views\n\nrouter = OrderedDefaultRouter()\nrouter.APIRootView = views.DCIMRootView\n\n# Locations\nrouter.register(\"location-types\", views.LocationTypeViewSet)\nrouter.register(\"locations\", views.LocationViewSet)\n\n# Racks\nrouter.register(\"rack-groups\", views.RackGroupViewSet)\nrouter.register(\"racks\", views.RackViewSet)\nrouter.register(\"rack-reservations\", views.RackReservationViewSet)\n\n# Device types\nrouter.register(\"hardware-families\", views.HardwareFamilyViewSet)\nrouter.register(\"manufacturers\", views.ManufacturerViewSet)\nrouter.register(\"device-types\", views.DeviceTypeViewSet)\n\n# Device type components\nrouter.register(\"console-port-templates\", views.ConsolePortTemplateViewSet)\nrouter.register(\"console-server-port-templates\", views.ConsoleServerPortTemplateViewSet)\nrouter.register(\"power-port-templates\", views.PowerPortTemplateViewSet)\nrouter.register(\"power-outlet-templates\", views.PowerOutletTemplateViewSet)\nrouter.register(\"interface-templates\", views.InterfaceTemplateViewSet)\nrouter.register(\"front-port-templates\", views.FrontPortTemplateViewSet)\nrouter.register(\"rear-port-templates\", views.RearPortTemplateViewSet)\nrouter.register(\"device-bay-templates\", views.DeviceBayTemplateViewSet)\n\n# Devices\nrouter.register(\"platforms\", views.PlatformViewSet)\nrouter.register(\"devices\", views.DeviceViewSet)\n\n# Device components\nrouter.register(\"console-ports\", views.ConsolePortViewSet)\nrouter.register(\"console-server-ports\", views.ConsoleServerPortViewSet)\nrouter.register(\"power-ports\", views.PowerPortViewSet)\nrouter.register(\"power-outlets\", views.PowerOutletViewSet)\nrouter.register(\"interfaces\", views.InterfaceViewSet)\nrouter.register(\"front-ports\", views.FrontPortViewSet)\nrouter.register(\"rear-ports\", views.RearPortViewSet)\nrouter.register(\"device-bays\", views.DeviceBayViewSet)\nrouter.register(\"inventory-items\", views.InventoryItemViewSet)\n\n# Connections\nrouter.register(\"console-connections\", views.ConsoleConnectionViewSet, basename=\"consoleconnections\")\nrouter.register(\"power-connections\", views.PowerConnectionViewSet, basename=\"powerconnections\")\nrouter.register(\n \"interface-connections\",\n views.InterfaceConnectionViewSet,\n basename=\"interfaceconnections\",\n)\n\n# Cables\nrouter.register(\"cables\", views.CableViewSet)\n\n# Interface Redundancy Group\nrouter.register(\"interface-redundancy-groups\", views.InterfaceRedundancyGroupViewSet)\nrouter.register(\"interface-redundancy-group-associations\", views.InterfaceRedundancyGroupAssociationViewSet)\n\n# Virtual chassis\nrouter.register(\"virtual-chassis\", views.VirtualChassisViewSet)\n\n# Power\nrouter.register(\"power-panels\", views.PowerPanelViewSet)\nrouter.register(\"power-feeds\", views.PowerFeedViewSet)\n\n# Device Redundancy Group\nrouter.register(\"device-redundancy-groups\", views.DeviceRedundancyGroupViewSet)\n\n# Software images\nrouter.register(\"software-images\", views.SoftwareImageViewSet)\nrouter.register(\"software-versions\", views.SoftwareVersionViewSet)\nrouter.register(\"device-type-to-software-image\", views.DeviceTypeToSoftwareImageViewSet)\n\n# Miscellaneous\nrouter.register(\"connected-device\", views.ConnectedDeviceViewSet, basename=\"connected-device\")\n\napp_name = \"dcim-api\"\nurlpatterns = router.urls\n", "path": "nautobot/dcim/api/urls.py"}], "after_files": [{"content": "from nautobot.core.api.routers import OrderedDefaultRouter\n\nfrom . import views\n\nrouter = OrderedDefaultRouter()\nrouter.APIRootView = views.DCIMRootView\n\n# Locations\nrouter.register(\"location-types\", views.LocationTypeViewSet)\nrouter.register(\"locations\", views.LocationViewSet)\n\n# Racks\nrouter.register(\"rack-groups\", views.RackGroupViewSet)\nrouter.register(\"racks\", views.RackViewSet)\nrouter.register(\"rack-reservations\", views.RackReservationViewSet)\n\n# Device types\nrouter.register(\"hardware-families\", views.HardwareFamilyViewSet)\nrouter.register(\"manufacturers\", views.ManufacturerViewSet)\nrouter.register(\"device-types\", views.DeviceTypeViewSet)\n\n# Device type components\nrouter.register(\"console-port-templates\", views.ConsolePortTemplateViewSet)\nrouter.register(\"console-server-port-templates\", views.ConsoleServerPortTemplateViewSet)\nrouter.register(\"power-port-templates\", views.PowerPortTemplateViewSet)\nrouter.register(\"power-outlet-templates\", views.PowerOutletTemplateViewSet)\nrouter.register(\"interface-templates\", views.InterfaceTemplateViewSet)\nrouter.register(\"front-port-templates\", views.FrontPortTemplateViewSet)\nrouter.register(\"rear-port-templates\", views.RearPortTemplateViewSet)\nrouter.register(\"device-bay-templates\", views.DeviceBayTemplateViewSet)\n\n# Devices\nrouter.register(\"platforms\", views.PlatformViewSet)\nrouter.register(\"devices\", views.DeviceViewSet)\n\n# Device components\nrouter.register(\"console-ports\", views.ConsolePortViewSet)\nrouter.register(\"console-server-ports\", views.ConsoleServerPortViewSet)\nrouter.register(\"power-ports\", views.PowerPortViewSet)\nrouter.register(\"power-outlets\", views.PowerOutletViewSet)\nrouter.register(\"interfaces\", views.InterfaceViewSet)\nrouter.register(\"front-ports\", views.FrontPortViewSet)\nrouter.register(\"rear-ports\", views.RearPortViewSet)\nrouter.register(\"device-bays\", views.DeviceBayViewSet)\nrouter.register(\"inventory-items\", views.InventoryItemViewSet)\n\n# Connections\nrouter.register(\"console-connections\", views.ConsoleConnectionViewSet, basename=\"consoleconnections\")\nrouter.register(\"power-connections\", views.PowerConnectionViewSet, basename=\"powerconnections\")\nrouter.register(\n \"interface-connections\",\n views.InterfaceConnectionViewSet,\n basename=\"interfaceconnections\",\n)\n\n# Cables\nrouter.register(\"cables\", views.CableViewSet)\n\n# Interface Redundancy Group\nrouter.register(\"interface-redundancy-groups\", views.InterfaceRedundancyGroupViewSet)\nrouter.register(\"interface-redundancy-group-associations\", views.InterfaceRedundancyGroupAssociationViewSet)\n\n# Virtual chassis\nrouter.register(\"virtual-chassis\", views.VirtualChassisViewSet)\n\n# Power\nrouter.register(\"power-panels\", views.PowerPanelViewSet)\nrouter.register(\"power-feeds\", views.PowerFeedViewSet)\n\n# Device Redundancy Group\nrouter.register(\"device-redundancy-groups\", views.DeviceRedundancyGroupViewSet)\n\n# Software images\nrouter.register(\"software-images\", views.SoftwareImageViewSet)\nrouter.register(\"software-versions\", views.SoftwareVersionViewSet)\nrouter.register(\"device-types-to-software-images\", views.DeviceTypeToSoftwareImageViewSet)\n\n# Miscellaneous\nrouter.register(\"connected-device\", views.ConnectedDeviceViewSet, basename=\"connected-device\")\n\napp_name = \"dcim-api\"\nurlpatterns = router.urls\n", "path": "nautobot/dcim/api/urls.py"}]} | 1,250 | 147 |
gh_patches_debug_31200 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Convex Hull - dependent features can trip up regionprops calculation
## Description
For some degenerate objects (e.g. intrinisically one less dimension, too few voxels), convex hull calculation throws exceptions.
This has been reported by Yi (@sunyi000 on Github) here
https://forum.image.sc/t/scikit-image-regionprops-and-regionprops-table-questions/52021/2
for intrinsically 1d objects. In this scase scipy throws the following exception. @jni has also been part of that image.sc thread.
```
scipy.spatial.qhull.QhullError: QH6013 qhull input error: input is less than 3-dimensional since all points have the same x coordinate 0
```
Other objects that can cause such exceptions are those with too few points, see for example:
https://github.com/scikit-image/scikit-image/issues/4865#issuecomment-711069382
## Way to reproduce
Still working on a minimal reproducible example
## Suggested way to fix
IMO,
rather than having `measure.regionprops` fail for such object, `QHullErrors` should be caught and the affected label should have a `NaN` in the relevant features. A warning should also be issued if this occurs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/morphology/convex_hull.py`
Content:
```
1 """Convex Hull."""
2 from itertools import product
3 import numpy as np
4 from scipy.spatial import ConvexHull
5 from ..measure.pnpoly import grid_points_in_poly
6 from ._convex_hull import possible_hull
7 from ..measure._label import label
8 from ..util import unique_rows
9 from .._shared.utils import warn
10
11 __all__ = ['convex_hull_image', 'convex_hull_object']
12
13
14 def _offsets_diamond(ndim):
15 offsets = np.zeros((2 * ndim, ndim))
16 for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))):
17 offsets[vertex, axis] = offset
18 return offsets
19
20
21 def _check_coords_in_hull(gridcoords, hull_equations, tolerance):
22 r"""Checks all the coordinates for inclusiveness in the convex hull.
23
24 Parameters
25 ----------
26 gridcoords : (M, N) ndarray
27 Coordinates of ``N`` points in ``M`` dimensions.
28 hull_equations : (M, N) ndarray
29 Hyperplane equations of the facets of the convex hull.
30 tolerance : float
31 Tolerance when determining whether a point is inside the hull. Due
32 to numerical floating point errors, a tolerance of 0 can result in
33 some points erroneously being classified as being outside the hull.
34
35 Returns
36 -------
37 coords_in_hull : ndarray of bool
38 Binary 1D ndarray representing points in n-dimensional space
39 with value ``True`` set for points inside the convex hull.
40
41 Notes
42 -----
43 Checking the inclusiveness of coordinates in a convex hull requires
44 intermediate calculations of dot products which are memory-intensive.
45 Thus, the convex hull equations are checked individually with all
46 coordinates to keep within the memory limit.
47
48 References
49 ----------
50 .. [1] https://github.com/scikit-image/scikit-image/issues/5019
51
52 """
53 ndim, n_coords = gridcoords.shape
54 n_hull_equations = hull_equations.shape[0]
55 coords_in_hull = np.ones(n_coords, dtype=bool)
56
57 # Pre-allocate arrays to cache intermediate results for reducing overheads
58 dot_array = np.empty(n_coords, dtype=np.float64)
59 test_ineq_temp = np.empty(n_coords, dtype=np.float64)
60 coords_single_ineq = np.empty(n_coords, dtype=bool)
61
62 # A point is in the hull if it satisfies all of the hull's inequalities
63 for idx in range(n_hull_equations):
64 # Tests a hyperplane equation on all coordinates of volume
65 np.dot(hull_equations[idx, :ndim], gridcoords, out=dot_array)
66 np.add(dot_array, hull_equations[idx, ndim:], out=test_ineq_temp)
67 np.less(test_ineq_temp, tolerance, out=coords_single_ineq)
68 coords_in_hull *= coords_single_ineq
69
70 return coords_in_hull
71
72
73 def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10):
74 """Compute the convex hull image of a binary image.
75
76 The convex hull is the set of pixels included in the smallest convex
77 polygon that surround all white pixels in the input image.
78
79 Parameters
80 ----------
81 image : array
82 Binary input image. This array is cast to bool before processing.
83 offset_coordinates : bool, optional
84 If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented
85 by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds
86 some "extent" to a pixel when computing the hull.
87 tolerance : float, optional
88 Tolerance when determining whether a point is inside the hull. Due
89 to numerical floating point errors, a tolerance of 0 can result in
90 some points erroneously being classified as being outside the hull.
91
92 Returns
93 -------
94 hull : (M, N) array of bool
95 Binary image with pixels in convex hull set to True.
96
97 References
98 ----------
99 .. [1] https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
100
101 """
102 ndim = image.ndim
103 if np.count_nonzero(image) == 0:
104 warn("Input image is entirely zero, no valid convex hull. "
105 "Returning empty image", UserWarning)
106 return np.zeros(image.shape, dtype=bool)
107 # In 2D, we do an optimisation by choosing only pixels that are
108 # the starting or ending pixel of a row or column. This vastly
109 # limits the number of coordinates to examine for the virtual hull.
110 if ndim == 2:
111 coords = possible_hull(np.ascontiguousarray(image, dtype=np.uint8))
112 else:
113 coords = np.transpose(np.nonzero(image))
114 if offset_coordinates:
115 # when offsetting, we multiply number of vertices by 2 * ndim.
116 # therefore, we reduce the number of coordinates by using a
117 # convex hull on the original set, before offsetting.
118 hull0 = ConvexHull(coords)
119 coords = hull0.points[hull0.vertices]
120
121 # Add a vertex for the middle of each pixel edge
122 if offset_coordinates:
123 offsets = _offsets_diamond(image.ndim)
124 coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim)
125
126 # repeated coordinates can *sometimes* cause problems in
127 # scipy.spatial.ConvexHull, so we remove them.
128 coords = unique_rows(coords)
129
130 # Find the convex hull
131 hull = ConvexHull(coords)
132 vertices = hull.points[hull.vertices]
133
134 # If 2D, use fast Cython function to locate convex hull pixels
135 if ndim == 2:
136 mask = grid_points_in_poly(image.shape, vertices)
137 else:
138 gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))],
139 (ndim, -1))
140
141 coords_in_hull = _check_coords_in_hull(gridcoords,
142 hull.equations, tolerance)
143 mask = np.reshape(coords_in_hull, image.shape)
144
145 return mask
146
147
148 def convex_hull_object(image, *, connectivity=2):
149 r"""Compute the convex hull image of individual objects in a binary image.
150
151 The convex hull is the set of pixels included in the smallest convex
152 polygon that surround all white pixels in the input image.
153
154 Parameters
155 ----------
156 image : (M, N) ndarray
157 Binary input image.
158 connectivity : {1, 2}, int, optional
159 Determines the neighbors of each pixel. Adjacent elements
160 within a squared distance of ``connectivity`` from pixel center
161 are considered neighbors.::
162
163 1-connectivity 2-connectivity
164 [ ] [ ] [ ] [ ]
165 | \ | /
166 [ ]--[x]--[ ] [ ]--[x]--[ ]
167 | / | \
168 [ ] [ ] [ ] [ ]
169
170 Returns
171 -------
172 hull : ndarray of bool
173 Binary image with pixels inside convex hull set to ``True``.
174
175 Notes
176 -----
177 This function uses ``skimage.morphology.label`` to define unique objects,
178 finds the convex hull of each using ``convex_hull_image``, and combines
179 these regions with logical OR. Be aware the convex hulls of unconnected
180 objects may overlap in the result. If this is suspected, consider using
181 convex_hull_image separately on each object or adjust ``connectivity``.
182 """
183 if image.ndim > 2:
184 raise ValueError("Input must be a 2D image")
185
186 if connectivity not in (1, 2):
187 raise ValueError('`connectivity` must be either 1 or 2.')
188
189 labeled_im = label(image, connectivity=connectivity, background=0)
190 convex_obj = np.zeros(image.shape, dtype=bool)
191 convex_img = np.zeros(image.shape, dtype=bool)
192
193 for i in range(1, labeled_im.max() + 1):
194 convex_obj = convex_hull_image(labeled_im == i)
195 convex_img = np.logical_or(convex_img, convex_obj)
196
197 return convex_img
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/morphology/convex_hull.py b/skimage/morphology/convex_hull.py
--- a/skimage/morphology/convex_hull.py
+++ b/skimage/morphology/convex_hull.py
@@ -8,6 +8,13 @@
from ..util import unique_rows
from .._shared.utils import warn
+try:
+ # Should be public API of scipy spatial once #15003 is released
+ # see https://github.com/scipy/scipy/pull/15003
+ from scipy.spatial import QhullError
+except ImportError:
+ from scipy.spatial.qhull import QhullError
+
__all__ = ['convex_hull_image', 'convex_hull_object']
@@ -115,7 +122,13 @@
# when offsetting, we multiply number of vertices by 2 * ndim.
# therefore, we reduce the number of coordinates by using a
# convex hull on the original set, before offsetting.
- hull0 = ConvexHull(coords)
+ try:
+ hull0 = ConvexHull(coords)
+ except QhullError as err:
+ warn(f"Failed to get convex hull image. "
+ f"Returning empty image, see error message below:\n"
+ f"{err}")
+ return np.zeros(image.shape, dtype=bool)
coords = hull0.points[hull0.vertices]
# Add a vertex for the middle of each pixel edge
@@ -128,7 +141,13 @@
coords = unique_rows(coords)
# Find the convex hull
- hull = ConvexHull(coords)
+ try:
+ hull = ConvexHull(coords)
+ except QhullError as err:
+ warn(f"Failed to get convex hull image. "
+ f"Returning empty image, see error message below:\n"
+ f"{err}")
+ return np.zeros(image.shape, dtype=bool)
vertices = hull.points[hull.vertices]
# If 2D, use fast Cython function to locate convex hull pixels
| {"golden_diff": "diff --git a/skimage/morphology/convex_hull.py b/skimage/morphology/convex_hull.py\n--- a/skimage/morphology/convex_hull.py\n+++ b/skimage/morphology/convex_hull.py\n@@ -8,6 +8,13 @@\n from ..util import unique_rows\n from .._shared.utils import warn\n \n+try:\n+ # Should be public API of scipy spatial once #15003 is released\n+ # see https://github.com/scipy/scipy/pull/15003\n+ from scipy.spatial import QhullError\n+except ImportError:\n+ from scipy.spatial.qhull import QhullError\n+\n __all__ = ['convex_hull_image', 'convex_hull_object']\n \n \n@@ -115,7 +122,13 @@\n # when offsetting, we multiply number of vertices by 2 * ndim.\n # therefore, we reduce the number of coordinates by using a\n # convex hull on the original set, before offsetting.\n- hull0 = ConvexHull(coords)\n+ try:\n+ hull0 = ConvexHull(coords)\n+ except QhullError as err:\n+ warn(f\"Failed to get convex hull image. \"\n+ f\"Returning empty image, see error message below:\\n\"\n+ f\"{err}\")\n+ return np.zeros(image.shape, dtype=bool)\n coords = hull0.points[hull0.vertices]\n \n # Add a vertex for the middle of each pixel edge\n@@ -128,7 +141,13 @@\n coords = unique_rows(coords)\n \n # Find the convex hull\n- hull = ConvexHull(coords)\n+ try:\n+ hull = ConvexHull(coords)\n+ except QhullError as err:\n+ warn(f\"Failed to get convex hull image. \"\n+ f\"Returning empty image, see error message below:\\n\"\n+ f\"{err}\")\n+ return np.zeros(image.shape, dtype=bool)\n vertices = hull.points[hull.vertices]\n \n # If 2D, use fast Cython function to locate convex hull pixels\n", "issue": "Convex Hull - dependent features can trip up regionprops calculation\n## Description\r\n\r\nFor some degenerate objects (e.g. intrinisically one less dimension, too few voxels), convex hull calculation throws exceptions.\r\nThis has been reported by Yi (@sunyi000 on Github) here \r\n https://forum.image.sc/t/scikit-image-regionprops-and-regionprops-table-questions/52021/2\r\nfor intrinsically 1d objects. In this scase scipy throws the following exception. @jni has also been part of that image.sc thread.\r\n\r\n```\r\nscipy.spatial.qhull.QhullError: QH6013 qhull input error: input is less than 3-dimensional since all points have the same x coordinate 0\r\n```\r\nOther objects that can cause such exceptions are those with too few points, see for example:\r\nhttps://github.com/scikit-image/scikit-image/issues/4865#issuecomment-711069382\r\n\r\n## Way to reproduce\r\nStill working on a minimal reproducible example\r\n\r\n## Suggested way to fix\r\n\r\nIMO,\r\nrather than having `measure.regionprops` fail for such object, `QHullErrors` should be caught and the affected label should have a `NaN` in the relevant features. A warning should also be issued if this occurs.\n", "before_files": [{"content": "\"\"\"Convex Hull.\"\"\"\nfrom itertools import product\nimport numpy as np\nfrom scipy.spatial import ConvexHull\nfrom ..measure.pnpoly import grid_points_in_poly\nfrom ._convex_hull import possible_hull\nfrom ..measure._label import label\nfrom ..util import unique_rows\nfrom .._shared.utils import warn\n\n__all__ = ['convex_hull_image', 'convex_hull_object']\n\n\ndef _offsets_diamond(ndim):\n offsets = np.zeros((2 * ndim, ndim))\n for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))):\n offsets[vertex, axis] = offset\n return offsets\n\n\ndef _check_coords_in_hull(gridcoords, hull_equations, tolerance):\n r\"\"\"Checks all the coordinates for inclusiveness in the convex hull.\n\n Parameters\n ----------\n gridcoords : (M, N) ndarray\n Coordinates of ``N`` points in ``M`` dimensions.\n hull_equations : (M, N) ndarray\n Hyperplane equations of the facets of the convex hull.\n tolerance : float\n Tolerance when determining whether a point is inside the hull. Due\n to numerical floating point errors, a tolerance of 0 can result in\n some points erroneously being classified as being outside the hull.\n\n Returns\n -------\n coords_in_hull : ndarray of bool\n Binary 1D ndarray representing points in n-dimensional space\n with value ``True`` set for points inside the convex hull.\n\n Notes\n -----\n Checking the inclusiveness of coordinates in a convex hull requires\n intermediate calculations of dot products which are memory-intensive.\n Thus, the convex hull equations are checked individually with all\n coordinates to keep within the memory limit.\n\n References\n ----------\n .. [1] https://github.com/scikit-image/scikit-image/issues/5019\n\n \"\"\"\n ndim, n_coords = gridcoords.shape\n n_hull_equations = hull_equations.shape[0]\n coords_in_hull = np.ones(n_coords, dtype=bool)\n\n # Pre-allocate arrays to cache intermediate results for reducing overheads\n dot_array = np.empty(n_coords, dtype=np.float64)\n test_ineq_temp = np.empty(n_coords, dtype=np.float64)\n coords_single_ineq = np.empty(n_coords, dtype=bool)\n\n # A point is in the hull if it satisfies all of the hull's inequalities\n for idx in range(n_hull_equations):\n # Tests a hyperplane equation on all coordinates of volume\n np.dot(hull_equations[idx, :ndim], gridcoords, out=dot_array)\n np.add(dot_array, hull_equations[idx, ndim:], out=test_ineq_temp)\n np.less(test_ineq_temp, tolerance, out=coords_single_ineq)\n coords_in_hull *= coords_single_ineq\n\n return coords_in_hull\n\n\ndef convex_hull_image(image, offset_coordinates=True, tolerance=1e-10):\n \"\"\"Compute the convex hull image of a binary image.\n\n The convex hull is the set of pixels included in the smallest convex\n polygon that surround all white pixels in the input image.\n\n Parameters\n ----------\n image : array\n Binary input image. This array is cast to bool before processing.\n offset_coordinates : bool, optional\n If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented\n by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds\n some \"extent\" to a pixel when computing the hull.\n tolerance : float, optional\n Tolerance when determining whether a point is inside the hull. Due\n to numerical floating point errors, a tolerance of 0 can result in\n some points erroneously being classified as being outside the hull.\n\n Returns\n -------\n hull : (M, N) array of bool\n Binary image with pixels in convex hull set to True.\n\n References\n ----------\n .. [1] https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/\n\n \"\"\"\n ndim = image.ndim\n if np.count_nonzero(image) == 0:\n warn(\"Input image is entirely zero, no valid convex hull. \"\n \"Returning empty image\", UserWarning)\n return np.zeros(image.shape, dtype=bool)\n # In 2D, we do an optimisation by choosing only pixels that are\n # the starting or ending pixel of a row or column. This vastly\n # limits the number of coordinates to examine for the virtual hull.\n if ndim == 2:\n coords = possible_hull(np.ascontiguousarray(image, dtype=np.uint8))\n else:\n coords = np.transpose(np.nonzero(image))\n if offset_coordinates:\n # when offsetting, we multiply number of vertices by 2 * ndim.\n # therefore, we reduce the number of coordinates by using a\n # convex hull on the original set, before offsetting.\n hull0 = ConvexHull(coords)\n coords = hull0.points[hull0.vertices]\n\n # Add a vertex for the middle of each pixel edge\n if offset_coordinates:\n offsets = _offsets_diamond(image.ndim)\n coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim)\n\n # repeated coordinates can *sometimes* cause problems in\n # scipy.spatial.ConvexHull, so we remove them.\n coords = unique_rows(coords)\n\n # Find the convex hull\n hull = ConvexHull(coords)\n vertices = hull.points[hull.vertices]\n\n # If 2D, use fast Cython function to locate convex hull pixels\n if ndim == 2:\n mask = grid_points_in_poly(image.shape, vertices)\n else:\n gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))],\n (ndim, -1))\n\n coords_in_hull = _check_coords_in_hull(gridcoords,\n hull.equations, tolerance)\n mask = np.reshape(coords_in_hull, image.shape)\n\n return mask\n\n\ndef convex_hull_object(image, *, connectivity=2):\n r\"\"\"Compute the convex hull image of individual objects in a binary image.\n\n The convex hull is the set of pixels included in the smallest convex\n polygon that surround all white pixels in the input image.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Binary input image.\n connectivity : {1, 2}, int, optional\n Determines the neighbors of each pixel. Adjacent elements\n within a squared distance of ``connectivity`` from pixel center\n are considered neighbors.::\n\n 1-connectivity 2-connectivity\n [ ] [ ] [ ] [ ]\n | \\ | /\n [ ]--[x]--[ ] [ ]--[x]--[ ]\n | / | \\\n [ ] [ ] [ ] [ ]\n\n Returns\n -------\n hull : ndarray of bool\n Binary image with pixels inside convex hull set to ``True``.\n\n Notes\n -----\n This function uses ``skimage.morphology.label`` to define unique objects,\n finds the convex hull of each using ``convex_hull_image``, and combines\n these regions with logical OR. Be aware the convex hulls of unconnected\n objects may overlap in the result. If this is suspected, consider using\n convex_hull_image separately on each object or adjust ``connectivity``.\n \"\"\"\n if image.ndim > 2:\n raise ValueError(\"Input must be a 2D image\")\n\n if connectivity not in (1, 2):\n raise ValueError('`connectivity` must be either 1 or 2.')\n\n labeled_im = label(image, connectivity=connectivity, background=0)\n convex_obj = np.zeros(image.shape, dtype=bool)\n convex_img = np.zeros(image.shape, dtype=bool)\n\n for i in range(1, labeled_im.max() + 1):\n convex_obj = convex_hull_image(labeled_im == i)\n convex_img = np.logical_or(convex_img, convex_obj)\n\n return convex_img\n", "path": "skimage/morphology/convex_hull.py"}], "after_files": [{"content": "\"\"\"Convex Hull.\"\"\"\nfrom itertools import product\nimport numpy as np\nfrom scipy.spatial import ConvexHull\nfrom ..measure.pnpoly import grid_points_in_poly\nfrom ._convex_hull import possible_hull\nfrom ..measure._label import label\nfrom ..util import unique_rows\nfrom .._shared.utils import warn\n\ntry:\n # Should be public API of scipy spatial once #15003 is released\n # see https://github.com/scipy/scipy/pull/15003\n from scipy.spatial import QhullError\nexcept ImportError:\n from scipy.spatial.qhull import QhullError\n\n__all__ = ['convex_hull_image', 'convex_hull_object']\n\n\ndef _offsets_diamond(ndim):\n offsets = np.zeros((2 * ndim, ndim))\n for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))):\n offsets[vertex, axis] = offset\n return offsets\n\n\ndef _check_coords_in_hull(gridcoords, hull_equations, tolerance):\n r\"\"\"Checks all the coordinates for inclusiveness in the convex hull.\n\n Parameters\n ----------\n gridcoords : (M, N) ndarray\n Coordinates of ``N`` points in ``M`` dimensions.\n hull_equations : (M, N) ndarray\n Hyperplane equations of the facets of the convex hull.\n tolerance : float\n Tolerance when determining whether a point is inside the hull. Due\n to numerical floating point errors, a tolerance of 0 can result in\n some points erroneously being classified as being outside the hull.\n\n Returns\n -------\n coords_in_hull : ndarray of bool\n Binary 1D ndarray representing points in n-dimensional space\n with value ``True`` set for points inside the convex hull.\n\n Notes\n -----\n Checking the inclusiveness of coordinates in a convex hull requires\n intermediate calculations of dot products which are memory-intensive.\n Thus, the convex hull equations are checked individually with all\n coordinates to keep within the memory limit.\n\n References\n ----------\n .. [1] https://github.com/scikit-image/scikit-image/issues/5019\n\n \"\"\"\n ndim, n_coords = gridcoords.shape\n n_hull_equations = hull_equations.shape[0]\n coords_in_hull = np.ones(n_coords, dtype=bool)\n\n # Pre-allocate arrays to cache intermediate results for reducing overheads\n dot_array = np.empty(n_coords, dtype=np.float64)\n test_ineq_temp = np.empty(n_coords, dtype=np.float64)\n coords_single_ineq = np.empty(n_coords, dtype=bool)\n\n # A point is in the hull if it satisfies all of the hull's inequalities\n for idx in range(n_hull_equations):\n # Tests a hyperplane equation on all coordinates of volume\n np.dot(hull_equations[idx, :ndim], gridcoords, out=dot_array)\n np.add(dot_array, hull_equations[idx, ndim:], out=test_ineq_temp)\n np.less(test_ineq_temp, tolerance, out=coords_single_ineq)\n coords_in_hull *= coords_single_ineq\n\n return coords_in_hull\n\n\ndef convex_hull_image(image, offset_coordinates=True, tolerance=1e-10):\n \"\"\"Compute the convex hull image of a binary image.\n\n The convex hull is the set of pixels included in the smallest convex\n polygon that surround all white pixels in the input image.\n\n Parameters\n ----------\n image : array\n Binary input image. This array is cast to bool before processing.\n offset_coordinates : bool, optional\n If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented\n by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds\n some \"extent\" to a pixel when computing the hull.\n tolerance : float, optional\n Tolerance when determining whether a point is inside the hull. Due\n to numerical floating point errors, a tolerance of 0 can result in\n some points erroneously being classified as being outside the hull.\n\n Returns\n -------\n hull : (M, N) array of bool\n Binary image with pixels in convex hull set to True.\n\n References\n ----------\n .. [1] https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/\n\n \"\"\"\n ndim = image.ndim\n if np.count_nonzero(image) == 0:\n warn(\"Input image is entirely zero, no valid convex hull. \"\n \"Returning empty image\", UserWarning)\n return np.zeros(image.shape, dtype=bool)\n # In 2D, we do an optimisation by choosing only pixels that are\n # the starting or ending pixel of a row or column. This vastly\n # limits the number of coordinates to examine for the virtual hull.\n if ndim == 2:\n coords = possible_hull(np.ascontiguousarray(image, dtype=np.uint8))\n else:\n coords = np.transpose(np.nonzero(image))\n if offset_coordinates:\n # when offsetting, we multiply number of vertices by 2 * ndim.\n # therefore, we reduce the number of coordinates by using a\n # convex hull on the original set, before offsetting.\n try:\n hull0 = ConvexHull(coords)\n except QhullError as err:\n warn(f\"Failed to get convex hull image. \"\n f\"Returning empty image, see error message below:\\n\"\n f\"{err}\")\n return np.zeros(image.shape, dtype=bool)\n coords = hull0.points[hull0.vertices]\n\n # Add a vertex for the middle of each pixel edge\n if offset_coordinates:\n offsets = _offsets_diamond(image.ndim)\n coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim)\n\n # repeated coordinates can *sometimes* cause problems in\n # scipy.spatial.ConvexHull, so we remove them.\n coords = unique_rows(coords)\n\n # Find the convex hull\n try:\n hull = ConvexHull(coords)\n except QhullError as err:\n warn(f\"Failed to get convex hull image. \"\n f\"Returning empty image, see error message below:\\n\"\n f\"{err}\")\n return np.zeros(image.shape, dtype=bool)\n vertices = hull.points[hull.vertices]\n\n # If 2D, use fast Cython function to locate convex hull pixels\n if ndim == 2:\n mask = grid_points_in_poly(image.shape, vertices)\n else:\n gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))],\n (ndim, -1))\n\n coords_in_hull = _check_coords_in_hull(gridcoords,\n hull.equations, tolerance)\n mask = np.reshape(coords_in_hull, image.shape)\n\n return mask\n\n\ndef convex_hull_object(image, *, connectivity=2):\n r\"\"\"Compute the convex hull image of individual objects in a binary image.\n\n The convex hull is the set of pixels included in the smallest convex\n polygon that surround all white pixels in the input image.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Binary input image.\n connectivity : {1, 2}, int, optional\n Determines the neighbors of each pixel. Adjacent elements\n within a squared distance of ``connectivity`` from pixel center\n are considered neighbors.::\n\n 1-connectivity 2-connectivity\n [ ] [ ] [ ] [ ]\n | \\ | /\n [ ]--[x]--[ ] [ ]--[x]--[ ]\n | / | \\\n [ ] [ ] [ ] [ ]\n\n Returns\n -------\n hull : ndarray of bool\n Binary image with pixels inside convex hull set to ``True``.\n\n Notes\n -----\n This function uses ``skimage.morphology.label`` to define unique objects,\n finds the convex hull of each using ``convex_hull_image``, and combines\n these regions with logical OR. Be aware the convex hulls of unconnected\n objects may overlap in the result. If this is suspected, consider using\n convex_hull_image separately on each object or adjust ``connectivity``.\n \"\"\"\n if image.ndim > 2:\n raise ValueError(\"Input must be a 2D image\")\n\n if connectivity not in (1, 2):\n raise ValueError('`connectivity` must be either 1 or 2.')\n\n labeled_im = label(image, connectivity=connectivity, background=0)\n convex_obj = np.zeros(image.shape, dtype=bool)\n convex_img = np.zeros(image.shape, dtype=bool)\n\n for i in range(1, labeled_im.max() + 1):\n convex_obj = convex_hull_image(labeled_im == i)\n convex_img = np.logical_or(convex_img, convex_obj)\n\n return convex_img\n", "path": "skimage/morphology/convex_hull.py"}]} | 2,855 | 482 |
gh_patches_debug_1490 | rasdani/github-patches | git_diff | coala__coala-bears-2811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HgCommitBear: Make asciinema
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/vcs/mercurial/HgCommitBear.py`
Content:
```
1 import os
2 import shutil
3
4 from bears.vcs.CommitBear import _CommitBear
5 from coala_utils.ContextManagers import change_directory
6 from coalib.misc.Shell import run_shell_command
7
8
9 class HgCommitBear(_CommitBear):
10 LANGUAGES = {'Hg'}
11 CAN_DETECT = {'Formatting'}
12
13 @classmethod
14 def check_prerequisites(cls):
15 if shutil.which('hg') is None:
16 return 'hg is not installed.'
17 else:
18 return True
19
20 def get_remotes():
21 remotes, _ = run_shell_command('hg paths')
22 return remotes
23
24 def get_head_commit(self):
25 with change_directory(self.get_config_dir() or os.getcwd()):
26 return run_shell_command('hg log -l 1 --template "{desc}"')
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bears/vcs/mercurial/HgCommitBear.py b/bears/vcs/mercurial/HgCommitBear.py
--- a/bears/vcs/mercurial/HgCommitBear.py
+++ b/bears/vcs/mercurial/HgCommitBear.py
@@ -9,6 +9,7 @@
class HgCommitBear(_CommitBear):
LANGUAGES = {'Hg'}
CAN_DETECT = {'Formatting'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'
@classmethod
def check_prerequisites(cls):
| {"golden_diff": "diff --git a/bears/vcs/mercurial/HgCommitBear.py b/bears/vcs/mercurial/HgCommitBear.py\n--- a/bears/vcs/mercurial/HgCommitBear.py\n+++ b/bears/vcs/mercurial/HgCommitBear.py\n@@ -9,6 +9,7 @@\n class HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'\n \n @classmethod\n def check_prerequisites(cls):\n", "issue": "HgCommitBear: Make asciinema\n\n", "before_files": [{"content": "import os\nimport shutil\n\nfrom bears.vcs.CommitBear import _CommitBear\nfrom coala_utils.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\n\n\nclass HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which('hg') is None:\n return 'hg is not installed.'\n else:\n return True\n\n def get_remotes():\n remotes, _ = run_shell_command('hg paths')\n return remotes\n\n def get_head_commit(self):\n with change_directory(self.get_config_dir() or os.getcwd()):\n return run_shell_command('hg log -l 1 --template \"{desc}\"')\n", "path": "bears/vcs/mercurial/HgCommitBear.py"}], "after_files": [{"content": "import os\nimport shutil\n\nfrom bears.vcs.CommitBear import _CommitBear\nfrom coala_utils.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\n\n\nclass HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which('hg') is None:\n return 'hg is not installed.'\n else:\n return True\n\n def get_remotes():\n remotes, _ = run_shell_command('hg paths')\n return remotes\n\n def get_head_commit(self):\n with change_directory(self.get_config_dir() or os.getcwd()):\n return run_shell_command('hg log -l 1 --template \"{desc}\"')\n", "path": "bears/vcs/mercurial/HgCommitBear.py"}]} | 490 | 146 |
gh_patches_debug_2203 | rasdani/github-patches | git_diff | ietf-tools__datatracker-4145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v1 api crashes on some content when serializing to xml
See the very old ticket at https://github.com/django-tastypie/django-tastypie/issues/1107.
submission.first_two_pages can contain formfeeds. These break tastypie's xml serialization. Json serialization succeeds.
The quick fix is to stop exposing first_two_pages through the API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/submit/resources.py`
Content:
```
1 # Copyright The IETF Trust 2014-2019, All Rights Reserved
2 # -*- coding: utf-8 -*-
3 # Autogenerated by the mkresources management command 2014-11-13 23:53
4
5
6 from ietf.api import ModelResource
7 from tastypie.fields import ToOneField, ToManyField
8 from tastypie.constants import ALL, ALL_WITH_RELATIONS
9 from tastypie.cache import SimpleCache
10
11 from ietf import api
12 from ietf.submit.models import ( Preapproval, SubmissionCheck, Submission,
13 SubmissionEmailEvent, SubmissionEvent, SubmissionExtResource )
14 from ietf.person.resources import PersonResource
15
16
17 class PreapprovalResource(ModelResource):
18 by = ToOneField(PersonResource, 'by')
19 class Meta:
20 cache = SimpleCache()
21 queryset = Preapproval.objects.all()
22 serializer = api.Serializer()
23 #resource_name = 'preapproval'
24 ordering = ['id', ]
25 filtering = {
26 "id": ALL,
27 "name": ALL,
28 "time": ALL,
29 "by": ALL_WITH_RELATIONS,
30 }
31 api.submit.register(PreapprovalResource())
32
33 from ietf.group.resources import GroupResource
34 from ietf.name.resources import DraftSubmissionStateNameResource
35 from ietf.doc.resources import DocumentResource
36 class SubmissionResource(ModelResource):
37 state = ToOneField(DraftSubmissionStateNameResource, 'state')
38 group = ToOneField(GroupResource, 'group', null=True)
39 draft = ToOneField(DocumentResource, 'draft', null=True)
40 checks = ToManyField('ietf.submit.resources.SubmissionCheckResource', 'checks', null=True)
41 class Meta:
42 cache = SimpleCache()
43 queryset = Submission.objects.all()
44 serializer = api.Serializer()
45 #resource_name = 'submission'
46 ordering = ['id', ]
47 filtering = {
48 "id": ALL,
49 "remote_ip": ALL,
50 "access_key": ALL,
51 "auth_key": ALL,
52 "name": ALL,
53 "title": ALL,
54 "abstract": ALL,
55 "rev": ALL,
56 "pages": ALL,
57 "authors": ALL,
58 "note": ALL,
59 "replaces": ALL,
60 "first_two_pages": ALL,
61 "file_types": ALL,
62 "file_size": ALL,
63 "document_date": ALL,
64 "submission_date": ALL,
65 "submitter": ALL,
66 "xml_version": ALL,
67 "state": ALL_WITH_RELATIONS,
68 "group": ALL_WITH_RELATIONS,
69 "draft": ALL_WITH_RELATIONS,
70 }
71 api.submit.register(SubmissionResource())
72
73 from ietf.person.resources import PersonResource
74 class SubmissionEventResource(ModelResource):
75 submission = ToOneField(SubmissionResource, 'submission')
76 by = ToOneField(PersonResource, 'by', null=True)
77 class Meta:
78 cache = SimpleCache()
79 queryset = SubmissionEvent.objects.all()
80 serializer = api.Serializer()
81 #resource_name = 'submissionevent'
82 ordering = ['id', ]
83 filtering = {
84 "id": ALL,
85 "time": ALL,
86 "desc": ALL,
87 "submission": ALL_WITH_RELATIONS,
88 "by": ALL_WITH_RELATIONS,
89 }
90 api.submit.register(SubmissionEventResource())
91
92 class SubmissionCheckResource(ModelResource):
93 submission = ToOneField(SubmissionResource, 'submission')
94 class Meta:
95 cache = SimpleCache()
96 queryset = SubmissionCheck.objects.all()
97 serializer = api.Serializer()
98 #resource_name = 'submissioncheck'
99 ordering = ['id', ]
100 filtering = {
101 "id": ALL,
102 "time": ALL,
103 "checker": ALL,
104 "passed": ALL,
105 "message": ALL,
106 "errors": ALL,
107 "warnings": ALL,
108 "items": ALL,
109 "submission": ALL_WITH_RELATIONS,
110 }
111 api.submit.register(SubmissionCheckResource())
112
113
114
115 from ietf.person.resources import PersonResource
116 from ietf.message.resources import MessageResource
117 class SubmissionEmailEventResource(ModelResource):
118 submission = ToOneField(SubmissionResource, 'submission')
119 by = ToOneField(PersonResource, 'by', null=True)
120 submissionevent_ptr = ToOneField(SubmissionEventResource, 'submissionevent_ptr')
121 message = ToOneField(MessageResource, 'message', null=True)
122 in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)
123 class Meta:
124 queryset = SubmissionEmailEvent.objects.all()
125 serializer = api.Serializer()
126 cache = SimpleCache()
127 #resource_name = 'submissionemailevent'
128 ordering = ['id', ]
129 filtering = {
130 "id": ALL,
131 "time": ALL,
132 "desc": ALL,
133 "msgtype": ALL,
134 "submission": ALL_WITH_RELATIONS,
135 "by": ALL_WITH_RELATIONS,
136 "submissionevent_ptr": ALL_WITH_RELATIONS,
137 "message": ALL_WITH_RELATIONS,
138 "in_reply_to": ALL_WITH_RELATIONS,
139 }
140 api.submit.register(SubmissionEmailEventResource())
141
142
143
144 from ietf.name.resources import ExtResourceNameResource
145 class SubmissionExtResourceResource(ModelResource):
146 name = ToOneField(ExtResourceNameResource, 'name')
147 submission = ToOneField(SubmissionResource, 'submission')
148 class Meta:
149 queryset = SubmissionExtResource.objects.all()
150 serializer = api.Serializer()
151 cache = SimpleCache()
152 resource_name = 'submissionextresource'
153 ordering = ['id', ]
154 filtering = {
155 "id": ALL,
156 "display_name": ALL,
157 "value": ALL,
158 "name": ALL_WITH_RELATIONS,
159 "submission": ALL_WITH_RELATIONS,
160 }
161 api.submit.register(SubmissionExtResourceResource())
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/submit/resources.py b/ietf/submit/resources.py
--- a/ietf/submit/resources.py
+++ b/ietf/submit/resources.py
@@ -68,6 +68,7 @@
"group": ALL_WITH_RELATIONS,
"draft": ALL_WITH_RELATIONS,
}
+ excludes = ('first_two_pages',)
api.submit.register(SubmissionResource())
from ietf.person.resources import PersonResource
| {"golden_diff": "diff --git a/ietf/submit/resources.py b/ietf/submit/resources.py\n--- a/ietf/submit/resources.py\n+++ b/ietf/submit/resources.py\n@@ -68,6 +68,7 @@\n \"group\": ALL_WITH_RELATIONS,\n \"draft\": ALL_WITH_RELATIONS,\n }\n+ excludes = ('first_two_pages',)\n api.submit.register(SubmissionResource())\n \n from ietf.person.resources import PersonResource\n", "issue": "v1 api crashes on some content when serializing to xml\nSee the very old ticket at https://github.com/django-tastypie/django-tastypie/issues/1107.\r\n\r\nsubmission.first_two_pages can contain formfeeds. These break tastypie's xml serialization. Json serialization succeeds.\r\n\r\nThe quick fix is to stop exposing first_two_pages through the API.\r\n\r\n\n", "before_files": [{"content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n# Autogenerated by the mkresources management command 2014-11-13 23:53\n\n\nfrom ietf.api import ModelResource\nfrom tastypie.fields import ToOneField, ToManyField\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie.cache import SimpleCache\n\nfrom ietf import api\nfrom ietf.submit.models import ( Preapproval, SubmissionCheck, Submission,\n SubmissionEmailEvent, SubmissionEvent, SubmissionExtResource )\nfrom ietf.person.resources import PersonResource\n\n\nclass PreapprovalResource(ModelResource):\n by = ToOneField(PersonResource, 'by')\n class Meta:\n cache = SimpleCache()\n queryset = Preapproval.objects.all()\n serializer = api.Serializer()\n #resource_name = 'preapproval'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"name\": ALL,\n \"time\": ALL,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(PreapprovalResource())\n\nfrom ietf.group.resources import GroupResource\nfrom ietf.name.resources import DraftSubmissionStateNameResource\nfrom ietf.doc.resources import DocumentResource\nclass SubmissionResource(ModelResource):\n state = ToOneField(DraftSubmissionStateNameResource, 'state')\n group = ToOneField(GroupResource, 'group', null=True)\n draft = ToOneField(DocumentResource, 'draft', null=True)\n checks = ToManyField('ietf.submit.resources.SubmissionCheckResource', 'checks', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = Submission.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submission'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"remote_ip\": ALL,\n \"access_key\": ALL,\n \"auth_key\": ALL,\n \"name\": ALL,\n \"title\": ALL,\n \"abstract\": ALL,\n \"rev\": ALL,\n \"pages\": ALL,\n \"authors\": ALL,\n \"note\": ALL,\n \"replaces\": ALL,\n \"first_two_pages\": ALL,\n \"file_types\": ALL,\n \"file_size\": ALL,\n \"document_date\": ALL,\n \"submission_date\": ALL,\n \"submitter\": ALL,\n \"xml_version\": ALL,\n \"state\": ALL_WITH_RELATIONS,\n \"group\": ALL_WITH_RELATIONS,\n \"draft\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionResource())\n\nfrom ietf.person.resources import PersonResource\nclass SubmissionEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionEvent.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissionevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEventResource())\n\nclass SubmissionCheckResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionCheck.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissioncheck'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"checker\": ALL,\n \"passed\": ALL,\n \"message\": ALL,\n \"errors\": ALL,\n \"warnings\": ALL,\n \"items\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionCheckResource())\n\n\n\nfrom ietf.person.resources import PersonResource\nfrom ietf.message.resources import MessageResource\nclass SubmissionEmailEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n submissionevent_ptr = ToOneField(SubmissionEventResource, 'submissionevent_ptr')\n message = ToOneField(MessageResource, 'message', null=True)\n in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)\n class Meta:\n queryset = SubmissionEmailEvent.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n #resource_name = 'submissionemailevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"msgtype\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n \"submissionevent_ptr\": ALL_WITH_RELATIONS,\n \"message\": ALL_WITH_RELATIONS,\n \"in_reply_to\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEmailEventResource())\n\n\n\nfrom ietf.name.resources import ExtResourceNameResource\nclass SubmissionExtResourceResource(ModelResource):\n name = ToOneField(ExtResourceNameResource, 'name')\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n queryset = SubmissionExtResource.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n resource_name = 'submissionextresource'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"display_name\": ALL,\n \"value\": ALL,\n \"name\": ALL_WITH_RELATIONS,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionExtResourceResource())\n", "path": "ietf/submit/resources.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n# Autogenerated by the mkresources management command 2014-11-13 23:53\n\n\nfrom ietf.api import ModelResource\nfrom tastypie.fields import ToOneField, ToManyField\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie.cache import SimpleCache\n\nfrom ietf import api\nfrom ietf.submit.models import ( Preapproval, SubmissionCheck, Submission,\n SubmissionEmailEvent, SubmissionEvent, SubmissionExtResource )\nfrom ietf.person.resources import PersonResource\n\n\nclass PreapprovalResource(ModelResource):\n by = ToOneField(PersonResource, 'by')\n class Meta:\n cache = SimpleCache()\n queryset = Preapproval.objects.all()\n serializer = api.Serializer()\n #resource_name = 'preapproval'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"name\": ALL,\n \"time\": ALL,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(PreapprovalResource())\n\nfrom ietf.group.resources import GroupResource\nfrom ietf.name.resources import DraftSubmissionStateNameResource\nfrom ietf.doc.resources import DocumentResource\nclass SubmissionResource(ModelResource):\n state = ToOneField(DraftSubmissionStateNameResource, 'state')\n group = ToOneField(GroupResource, 'group', null=True)\n draft = ToOneField(DocumentResource, 'draft', null=True)\n checks = ToManyField('ietf.submit.resources.SubmissionCheckResource', 'checks', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = Submission.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submission'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"remote_ip\": ALL,\n \"access_key\": ALL,\n \"auth_key\": ALL,\n \"name\": ALL,\n \"title\": ALL,\n \"abstract\": ALL,\n \"rev\": ALL,\n \"pages\": ALL,\n \"authors\": ALL,\n \"note\": ALL,\n \"replaces\": ALL,\n \"first_two_pages\": ALL,\n \"file_types\": ALL,\n \"file_size\": ALL,\n \"document_date\": ALL,\n \"submission_date\": ALL,\n \"submitter\": ALL,\n \"xml_version\": ALL,\n \"state\": ALL_WITH_RELATIONS,\n \"group\": ALL_WITH_RELATIONS,\n \"draft\": ALL_WITH_RELATIONS,\n }\n excludes = ('first_two_pages',)\napi.submit.register(SubmissionResource())\n\nfrom ietf.person.resources import PersonResource\nclass SubmissionEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionEvent.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissionevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEventResource())\n\nclass SubmissionCheckResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n cache = SimpleCache()\n queryset = SubmissionCheck.objects.all()\n serializer = api.Serializer()\n #resource_name = 'submissioncheck'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"checker\": ALL,\n \"passed\": ALL,\n \"message\": ALL,\n \"errors\": ALL,\n \"warnings\": ALL,\n \"items\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionCheckResource())\n\n\n\nfrom ietf.person.resources import PersonResource\nfrom ietf.message.resources import MessageResource\nclass SubmissionEmailEventResource(ModelResource):\n submission = ToOneField(SubmissionResource, 'submission')\n by = ToOneField(PersonResource, 'by', null=True)\n submissionevent_ptr = ToOneField(SubmissionEventResource, 'submissionevent_ptr')\n message = ToOneField(MessageResource, 'message', null=True)\n in_reply_to = ToOneField(MessageResource, 'in_reply_to', null=True)\n class Meta:\n queryset = SubmissionEmailEvent.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n #resource_name = 'submissionemailevent'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"time\": ALL,\n \"desc\": ALL,\n \"msgtype\": ALL,\n \"submission\": ALL_WITH_RELATIONS,\n \"by\": ALL_WITH_RELATIONS,\n \"submissionevent_ptr\": ALL_WITH_RELATIONS,\n \"message\": ALL_WITH_RELATIONS,\n \"in_reply_to\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionEmailEventResource())\n\n\n\nfrom ietf.name.resources import ExtResourceNameResource\nclass SubmissionExtResourceResource(ModelResource):\n name = ToOneField(ExtResourceNameResource, 'name')\n submission = ToOneField(SubmissionResource, 'submission')\n class Meta:\n queryset = SubmissionExtResource.objects.all()\n serializer = api.Serializer()\n cache = SimpleCache()\n resource_name = 'submissionextresource'\n ordering = ['id', ]\n filtering = { \n \"id\": ALL,\n \"display_name\": ALL,\n \"value\": ALL,\n \"name\": ALL_WITH_RELATIONS,\n \"submission\": ALL_WITH_RELATIONS,\n }\napi.submit.register(SubmissionExtResourceResource())\n", "path": "ietf/submit/resources.py"}]} | 1,954 | 95 |
gh_patches_debug_26289 | rasdani/github-patches | git_diff | great-expectations__great_expectations-4506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/expectations/regex_based_column_map_expectation_template.py`
Content:
```
1 """
2 This is a template for creating custom RegexBasedColumnMapExpectations.
3 For detailed instructions on how to use it, please see:
4 https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations
5 """
6
7 from typing import Dict, Optional
8
9 from great_expectations.core.expectation_configuration import ExpectationConfiguration
10 from great_expectations.exceptions.exceptions import (
11 InvalidExpectationConfigurationError,
12 )
13 from great_expectations.expectations.regex_based_column_map_expectation import (
14 RegexBasedColumnMapExpectation,
15 RegexColumnMapMetricProvider,
16 )
17
18
19 # <snippet>
20 # This class defines the Expectation itself
21 class ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):
22 """TODO: Add a docstring here"""
23
24 # These values will be used to configure the metric created by your expectation
25 regex_snake_name = "regex_name"
26 regex_camel_name = "RegexName"
27 regex = "regex pattern"
28 semantic_type_name_plural = None
29
30 # These examples will be shown in the public gallery.
31 # They will also be executed as unit tests for your Expectation.
32 examples = []
33
34 def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
35 """
36 Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
37 necessary configuration arguments have been provided for the validation of the expectation.
38
39 Args:
40 configuration (OPTIONAL[ExpectationConfiguration]): \
41 An optional Expectation Configuration entry that will be used to configure the expectation
42 Returns:
43 True if the configuration has been validated successfully. Otherwise, raises an exception
44 """
45
46 super().validate_configuration(configuration)
47 if configuration is None:
48 configuration = self.configuration
49
50 # # Check other things in configuration.kwargs and raise Exceptions if needed
51 # try:
52 # assert (
53 # ...
54 # ), "message"
55 # assert (
56 # ...
57 # ), "message"
58 # except AssertionError as e:
59 # raise InvalidExpectationConfigurationError(str(e))
60
61 return True
62
63 # Here your regex is used to create a custom metric for this expectation
64 map_metric = RegexBasedColumnMapExpectation.register_metric(
65 regex_snake_name=regex_snake_name,
66 regex_camel_name=regex_camel_name,
67 regex_=regex,
68 )
69
70 # This object contains metadata for display in the public Gallery
71 library_metadata = {
72 "tags": [], # Tags for this Expectation in the Gallery
73 "contributors": [ # Github handles for all contributors to this Expectation.
74 "@your_name_here", # Don't forget to add your github handle here!
75 ],
76 }
77
78
79 # </snippet>
80 if __name__ == "__main__":
81 ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py
--- a/examples/expectations/regex_based_column_map_expectation_template.py
+++ b/examples/expectations/regex_based_column_map_expectation_template.py
@@ -31,35 +31,6 @@
# They will also be executed as unit tests for your Expectation.
examples = []
- def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
- """
- Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
- necessary configuration arguments have been provided for the validation of the expectation.
-
- Args:
- configuration (OPTIONAL[ExpectationConfiguration]): \
- An optional Expectation Configuration entry that will be used to configure the expectation
- Returns:
- True if the configuration has been validated successfully. Otherwise, raises an exception
- """
-
- super().validate_configuration(configuration)
- if configuration is None:
- configuration = self.configuration
-
- # # Check other things in configuration.kwargs and raise Exceptions if needed
- # try:
- # assert (
- # ...
- # ), "message"
- # assert (
- # ...
- # ), "message"
- # except AssertionError as e:
- # raise InvalidExpectationConfigurationError(str(e))
-
- return True
-
# Here your regex is used to create a custom metric for this expectation
map_metric = RegexBasedColumnMapExpectation.register_metric(
regex_snake_name=regex_snake_name,
| {"golden_diff": "diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py\n--- a/examples/expectations/regex_based_column_map_expectation_template.py\n+++ b/examples/expectations/regex_based_column_map_expectation_template.py\n@@ -31,35 +31,6 @@\n # They will also be executed as unit tests for your Expectation.\n examples = []\n \n- def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n- \"\"\"\n- Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n- necessary configuration arguments have been provided for the validation of the expectation.\n-\n- Args:\n- configuration (OPTIONAL[ExpectationConfiguration]): \\\n- An optional Expectation Configuration entry that will be used to configure the expectation\n- Returns:\n- True if the configuration has been validated successfully. Otherwise, raises an exception\n- \"\"\"\n-\n- super().validate_configuration(configuration)\n- if configuration is None:\n- configuration = self.configuration\n-\n- # # Check other things in configuration.kwargs and raise Exceptions if needed\n- # try:\n- # assert (\n- # ...\n- # ), \"message\"\n- # assert (\n- # ...\n- # ), \"message\"\n- # except AssertionError as e:\n- # raise InvalidExpectationConfigurationError(str(e))\n-\n- return True\n-\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "\"\"\"\nThis is a template for creating custom RegexBasedColumnMapExpectations.\nFor detailed instructions on how to use it, please see:\n https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations\n\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.regex_based_column_map_expectation import (\n RegexBasedColumnMapExpectation,\n RegexColumnMapMetricProvider,\n)\n\n\n# <snippet>\n# This class defines the Expectation itself\nclass ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):\n \"\"\"TODO: Add a docstring here\"\"\"\n\n # These values will be used to configure the metric created by your expectation\n regex_snake_name = \"regex_name\"\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = []\n\n def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n \"\"\"\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): \\\n An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n True if the configuration has been validated successfully. Otherwise, raises an exception\n \"\"\"\n\n super().validate_configuration(configuration)\n if configuration is None:\n configuration = self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))\n\n return True\n\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@your_name_here\", # Don't forget to add your github handle here!\n ],\n }\n\n\n# </snippet>\nif __name__ == \"__main__\":\n ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()\n", "path": "examples/expectations/regex_based_column_map_expectation_template.py"}], "after_files": [{"content": "\"\"\"\nThis is a template for creating custom RegexBasedColumnMapExpectations.\nFor detailed instructions on how to use it, please see:\n https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations\n\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.regex_based_column_map_expectation import (\n RegexBasedColumnMapExpectation,\n RegexColumnMapMetricProvider,\n)\n\n\n# <snippet>\n# This class defines the Expectation itself\nclass ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):\n \"\"\"TODO: Add a docstring here\"\"\"\n\n # These values will be used to configure the metric created by your expectation\n regex_snake_name = \"regex_name\"\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = []\n\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@your_name_here\", # Don't forget to add your github handle here!\n ],\n }\n\n\n# </snippet>\nif __name__ == \"__main__\":\n ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()\n", "path": "examples/expectations/regex_based_column_map_expectation_template.py"}]} | 1,072 | 365 |
gh_patches_debug_2770 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7672 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pre-commit: Add Python linting tool ruff
Add https://beta.ruff.rs to our pre-commit as a replacement for `flake8`, `isort`, `pyupgrade`, etc. but written in Rust instead of Python. It can lint the CPython codebase from scratch in 0.29 seconds.
Adopting Ruff should greatly accelerate our pre-commits and our pre-commit.ci jobs. We will run the new and old tools in parallel for a sprint or two to verify the coverage before we completely drop the old tools.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/catalog/marc/marc_subject.py`
Content:
```
1 """ This entire module is deprecated,
2 openlibrary.catalog.marc.get_subjects is the preferred module
3 """
4
5 # Tell the flake8 linter to ignore this deprecated file.
6 # flake8: noqa
7
8 from collections import defaultdict
9 from deprecated import deprecated
10 from lxml import etree
11 import re
12
13
14 from openlibrary.catalog.utils.query import get_mc
15 from openlibrary.catalog.get_ia import (
16 get_from_archive,
17 marc_formats,
18 urlopen_keep_trying,
19 )
20 from openlibrary.catalog.marc import get_subjects
21 from openlibrary.catalog.marc.marc_binary import MarcBinary
22 from openlibrary.catalog.marc.marc_xml import (
23 read_marc_file,
24 MarcXml,
25 BlankTag,
26 BadSubtag,
27 )
28 from openlibrary.catalog.utils import (
29 remove_trailing_dot,
30 remove_trailing_number_dot,
31 flip_name,
32 )
33
34
35 subject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}
36
37 re_flip_name = re.compile('^(.+), ([A-Z].+)$')
38
39 # 'Rhodes, Dan (Fictitious character)'
40 re_fictitious_character = re.compile(r'^(.+), (.+)( \(.* character\))$')
41 re_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)
42 re_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')
43
44 re_place_comma = re.compile('^(.+), (.+)$')
45 re_paren = re.compile('[()]')
46
47
48 @deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')
49 def flip_place(s):
50 return get_subjects.flip_place(s)
51
52
53 @deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')
54 def flip_subject(s):
55 return get_subjects.flip_subject(s)
56
57
58 @deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')
59 def four_types(i):
60 return get_subjects.four_types(i)
61
62
63 archive_url = "http://archive.org/download/"
64
65
66 @deprecated
67 def load_binary(ia):
68 url = archive_url + ia + '/' + ia + '_meta.mrc'
69 f = urlopen_keep_trying(url)
70 data = f.content
71 assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]
72 if len(data) != int(data[:5]):
73 data = data.decode('utf-8').encode('raw_unicode_escape')
74 if len(data) != int(data[:5]):
75 return
76 return MarcBinary(data)
77
78
79 @deprecated
80 def load_xml(ia):
81 url = archive_url + ia + '/' + ia + '_marc.xml'
82 f = urlopen_keep_trying(url)
83 root = etree.fromstring(f.text).getroot()
84 if root.tag == '{http://www.loc.gov/MARC21/slim}collection':
85 root = root[0]
86 return MarcXml(root)
87
88
89 @deprecated
90 def subjects_for_work(rec):
91 field_map = {
92 'subject': 'subjects',
93 'place': 'subject_places',
94 'time': 'subject_times',
95 'person': 'subject_people',
96 }
97
98 subjects = four_types(read_subjects(rec))
99
100 return {field_map[k]: list(v) for k, v in subjects.items()}
101
102
103 re_edition_key = re.compile(r'^/(?:b|books)/(OL\d+M)$')
104
105
106 @deprecated
107 def get_subjects_from_ia(ia):
108 formats = marc_formats(ia)
109 if not any(formats.values()):
110 return {}
111 rec = None
112 if formats['bin']:
113 rec = load_binary(ia)
114 if not rec:
115 assert formats['xml']
116 rec = load_xml(ia)
117 return read_subjects(rec)
118
119
120 re_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\.xml|meta\.mrc)(:0:\d+)?$')
121
122
123 @deprecated
124 def get_work_subjects(w, do_get_mc=True):
125 found = set()
126 for e in w['editions']:
127 sr = e.get('source_records', [])
128 if sr:
129 for i in sr:
130 if i.endswith('initial import'):
131 continue
132 if i.startswith(('ia:', 'marc:')):
133 found.add(i)
134 continue
135 else:
136 mc = None
137 if do_get_mc:
138 m = re_edition_key.match(e['key'])
139 mc = get_mc('/b/' + m.group(1))
140 if mc:
141 if mc.endswith('initial import'):
142 continue
143 if not mc.startswith('amazon:') and not re_ia_marc.match(mc):
144 found.add('marc:' + mc)
145 subjects = []
146 for sr in found:
147 if sr.startswith('marc:ia:'):
148 subjects.append(get_subjects_from_ia(sr[8:]))
149 elif sr.startswith('marc:'):
150 loc = sr[5:]
151 data = get_from_archive(loc)
152 rec = MarcBinary(data)
153 subjects.append(read_subjects(rec))
154 else:
155 assert sr.startswith('ia:')
156 subjects.append(get_subjects_from_ia(sr[3:]))
157 return combine_subjects(subjects)
158
159
160 @deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')
161 def tidy_subject(s):
162 return get_subjects.tidy_subject(s)
163
164
165 re_aspects = re.compile(' [Aa]spects$')
166
167
168 @deprecated
169 def find_aspects(f):
170 cur = [(i, j) for i, j in f.get_subfields('ax')]
171 if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':
172 return
173 a, x = cur[0][1], cur[1][1]
174 x = x.strip('. ')
175 a = a.strip('. ')
176 if not re_aspects.search(x):
177 return
178 if a == 'Body, Human':
179 a = 'the Human body'
180 return x + ' of ' + flip_subject(a)
181
182
183 @deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')
184 def read_subjects(rec):
185 return get_subjects.read_subject(s)
186
187
188 @deprecated
189 def combine_subjects(subjects):
190 all_subjects = defaultdict(lambda: defaultdict(int))
191 for a in subjects:
192 for b, c in a.items():
193 for d, e in c.items():
194 all_subjects[b][d] += e
195 return {k: dict(v) for k, v in all_subjects.items()}
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/catalog/marc/marc_subject.py b/openlibrary/catalog/marc/marc_subject.py
--- a/openlibrary/catalog/marc/marc_subject.py
+++ b/openlibrary/catalog/marc/marc_subject.py
@@ -2,8 +2,8 @@
openlibrary.catalog.marc.get_subjects is the preferred module
"""
-# Tell the flake8 linter to ignore this deprecated file.
-# flake8: noqa
+# Tell the ruff linter to ignore this deprecated file.
+# ruff: noqa
from collections import defaultdict
from deprecated import deprecated
| {"golden_diff": "diff --git a/openlibrary/catalog/marc/marc_subject.py b/openlibrary/catalog/marc/marc_subject.py\n--- a/openlibrary/catalog/marc/marc_subject.py\n+++ b/openlibrary/catalog/marc/marc_subject.py\n@@ -2,8 +2,8 @@\n openlibrary.catalog.marc.get_subjects is the preferred module\n \"\"\"\n \n-# Tell the flake8 linter to ignore this deprecated file.\n-# flake8: noqa\n+# Tell the ruff linter to ignore this deprecated file.\n+# ruff: noqa\n \n from collections import defaultdict\n from deprecated import deprecated\n", "issue": "pre-commit: Add Python linting tool ruff\nAdd https://beta.ruff.rs to our pre-commit as a replacement for `flake8`, `isort`, `pyupgrade`, etc. but written in Rust instead of Python. It can lint the CPython codebase from scratch in 0.29 seconds.\r\n\r\nAdopting Ruff should greatly accelerate our pre-commits and our pre-commit.ci jobs. We will run the new and old tools in parallel for a sprint or two to verify the coverage before we completely drop the old tools.\n", "before_files": [{"content": "\"\"\" This entire module is deprecated,\n openlibrary.catalog.marc.get_subjects is the preferred module\n\"\"\"\n\n# Tell the flake8 linter to ignore this deprecated file.\n# flake8: noqa\n\nfrom collections import defaultdict\nfrom deprecated import deprecated\nfrom lxml import etree\nimport re\n\n\nfrom openlibrary.catalog.utils.query import get_mc\nfrom openlibrary.catalog.get_ia import (\n get_from_archive,\n marc_formats,\n urlopen_keep_trying,\n)\nfrom openlibrary.catalog.marc import get_subjects\nfrom openlibrary.catalog.marc.marc_binary import MarcBinary\nfrom openlibrary.catalog.marc.marc_xml import (\n read_marc_file,\n MarcXml,\n BlankTag,\n BadSubtag,\n)\nfrom openlibrary.catalog.utils import (\n remove_trailing_dot,\n remove_trailing_number_dot,\n flip_name,\n)\n\n\nsubject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}\n\nre_flip_name = re.compile('^(.+), ([A-Z].+)$')\n\n# 'Rhodes, Dan (Fictitious character)'\nre_fictitious_character = re.compile(r'^(.+), (.+)( \\(.* character\\))$')\nre_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)\nre_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')\n\nre_place_comma = re.compile('^(.+), (.+)$')\nre_paren = re.compile('[()]')\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')\ndef flip_place(s):\n return get_subjects.flip_place(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')\ndef flip_subject(s):\n return get_subjects.flip_subject(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')\ndef four_types(i):\n return get_subjects.four_types(i)\n\n\narchive_url = \"http://archive.org/download/\"\n\n\n@deprecated\ndef load_binary(ia):\n url = archive_url + ia + '/' + ia + '_meta.mrc'\n f = urlopen_keep_trying(url)\n data = f.content\n assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]\n if len(data) != int(data[:5]):\n data = data.decode('utf-8').encode('raw_unicode_escape')\n if len(data) != int(data[:5]):\n return\n return MarcBinary(data)\n\n\n@deprecated\ndef load_xml(ia):\n url = archive_url + ia + '/' + ia + '_marc.xml'\n f = urlopen_keep_trying(url)\n root = etree.fromstring(f.text).getroot()\n if root.tag == '{http://www.loc.gov/MARC21/slim}collection':\n root = root[0]\n return MarcXml(root)\n\n\n@deprecated\ndef subjects_for_work(rec):\n field_map = {\n 'subject': 'subjects',\n 'place': 'subject_places',\n 'time': 'subject_times',\n 'person': 'subject_people',\n }\n\n subjects = four_types(read_subjects(rec))\n\n return {field_map[k]: list(v) for k, v in subjects.items()}\n\n\nre_edition_key = re.compile(r'^/(?:b|books)/(OL\\d+M)$')\n\n\n@deprecated\ndef get_subjects_from_ia(ia):\n formats = marc_formats(ia)\n if not any(formats.values()):\n return {}\n rec = None\n if formats['bin']:\n rec = load_binary(ia)\n if not rec:\n assert formats['xml']\n rec = load_xml(ia)\n return read_subjects(rec)\n\n\nre_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\\.xml|meta\\.mrc)(:0:\\d+)?$')\n\n\n@deprecated\ndef get_work_subjects(w, do_get_mc=True):\n found = set()\n for e in w['editions']:\n sr = e.get('source_records', [])\n if sr:\n for i in sr:\n if i.endswith('initial import'):\n continue\n if i.startswith(('ia:', 'marc:')):\n found.add(i)\n continue\n else:\n mc = None\n if do_get_mc:\n m = re_edition_key.match(e['key'])\n mc = get_mc('/b/' + m.group(1))\n if mc:\n if mc.endswith('initial import'):\n continue\n if not mc.startswith('amazon:') and not re_ia_marc.match(mc):\n found.add('marc:' + mc)\n subjects = []\n for sr in found:\n if sr.startswith('marc:ia:'):\n subjects.append(get_subjects_from_ia(sr[8:]))\n elif sr.startswith('marc:'):\n loc = sr[5:]\n data = get_from_archive(loc)\n rec = MarcBinary(data)\n subjects.append(read_subjects(rec))\n else:\n assert sr.startswith('ia:')\n subjects.append(get_subjects_from_ia(sr[3:]))\n return combine_subjects(subjects)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')\ndef tidy_subject(s):\n return get_subjects.tidy_subject(s)\n\n\nre_aspects = re.compile(' [Aa]spects$')\n\n\n@deprecated\ndef find_aspects(f):\n cur = [(i, j) for i, j in f.get_subfields('ax')]\n if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':\n return\n a, x = cur[0][1], cur[1][1]\n x = x.strip('. ')\n a = a.strip('. ')\n if not re_aspects.search(x):\n return\n if a == 'Body, Human':\n a = 'the Human body'\n return x + ' of ' + flip_subject(a)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')\ndef read_subjects(rec):\n return get_subjects.read_subject(s)\n\n\n@deprecated\ndef combine_subjects(subjects):\n all_subjects = defaultdict(lambda: defaultdict(int))\n for a in subjects:\n for b, c in a.items():\n for d, e in c.items():\n all_subjects[b][d] += e\n return {k: dict(v) for k, v in all_subjects.items()}\n", "path": "openlibrary/catalog/marc/marc_subject.py"}], "after_files": [{"content": "\"\"\" This entire module is deprecated,\n openlibrary.catalog.marc.get_subjects is the preferred module\n\"\"\"\n\n# Tell the ruff linter to ignore this deprecated file.\n# ruff: noqa\n\nfrom collections import defaultdict\nfrom deprecated import deprecated\nfrom lxml import etree\nimport re\n\n\nfrom openlibrary.catalog.utils.query import get_mc\nfrom openlibrary.catalog.get_ia import (\n get_from_archive,\n marc_formats,\n urlopen_keep_trying,\n)\nfrom openlibrary.catalog.marc import get_subjects\nfrom openlibrary.catalog.marc.marc_binary import MarcBinary\nfrom openlibrary.catalog.marc.marc_xml import (\n read_marc_file,\n MarcXml,\n BlankTag,\n BadSubtag,\n)\nfrom openlibrary.catalog.utils import (\n remove_trailing_dot,\n remove_trailing_number_dot,\n flip_name,\n)\n\n\nsubject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}\n\nre_flip_name = re.compile('^(.+), ([A-Z].+)$')\n\n# 'Rhodes, Dan (Fictitious character)'\nre_fictitious_character = re.compile(r'^(.+), (.+)( \\(.* character\\))$')\nre_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)\nre_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')\n\nre_place_comma = re.compile('^(.+), (.+)$')\nre_paren = re.compile('[()]')\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')\ndef flip_place(s):\n return get_subjects.flip_place(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')\ndef flip_subject(s):\n return get_subjects.flip_subject(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')\ndef four_types(i):\n return get_subjects.four_types(i)\n\n\narchive_url = \"http://archive.org/download/\"\n\n\n@deprecated\ndef load_binary(ia):\n url = archive_url + ia + '/' + ia + '_meta.mrc'\n f = urlopen_keep_trying(url)\n data = f.content\n assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]\n if len(data) != int(data[:5]):\n data = data.decode('utf-8').encode('raw_unicode_escape')\n if len(data) != int(data[:5]):\n return\n return MarcBinary(data)\n\n\n@deprecated\ndef load_xml(ia):\n url = archive_url + ia + '/' + ia + '_marc.xml'\n f = urlopen_keep_trying(url)\n root = etree.fromstring(f.text).getroot()\n if root.tag == '{http://www.loc.gov/MARC21/slim}collection':\n root = root[0]\n return MarcXml(root)\n\n\n@deprecated\ndef subjects_for_work(rec):\n field_map = {\n 'subject': 'subjects',\n 'place': 'subject_places',\n 'time': 'subject_times',\n 'person': 'subject_people',\n }\n\n subjects = four_types(read_subjects(rec))\n\n return {field_map[k]: list(v) for k, v in subjects.items()}\n\n\nre_edition_key = re.compile(r'^/(?:b|books)/(OL\\d+M)$')\n\n\n@deprecated\ndef get_subjects_from_ia(ia):\n formats = marc_formats(ia)\n if not any(formats.values()):\n return {}\n rec = None\n if formats['bin']:\n rec = load_binary(ia)\n if not rec:\n assert formats['xml']\n rec = load_xml(ia)\n return read_subjects(rec)\n\n\nre_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\\.xml|meta\\.mrc)(:0:\\d+)?$')\n\n\n@deprecated\ndef get_work_subjects(w, do_get_mc=True):\n found = set()\n for e in w['editions']:\n sr = e.get('source_records', [])\n if sr:\n for i in sr:\n if i.endswith('initial import'):\n continue\n if i.startswith(('ia:', 'marc:')):\n found.add(i)\n continue\n else:\n mc = None\n if do_get_mc:\n m = re_edition_key.match(e['key'])\n mc = get_mc('/b/' + m.group(1))\n if mc:\n if mc.endswith('initial import'):\n continue\n if not mc.startswith('amazon:') and not re_ia_marc.match(mc):\n found.add('marc:' + mc)\n subjects = []\n for sr in found:\n if sr.startswith('marc:ia:'):\n subjects.append(get_subjects_from_ia(sr[8:]))\n elif sr.startswith('marc:'):\n loc = sr[5:]\n data = get_from_archive(loc)\n rec = MarcBinary(data)\n subjects.append(read_subjects(rec))\n else:\n assert sr.startswith('ia:')\n subjects.append(get_subjects_from_ia(sr[3:]))\n return combine_subjects(subjects)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')\ndef tidy_subject(s):\n return get_subjects.tidy_subject(s)\n\n\nre_aspects = re.compile(' [Aa]spects$')\n\n\n@deprecated\ndef find_aspects(f):\n cur = [(i, j) for i, j in f.get_subfields('ax')]\n if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':\n return\n a, x = cur[0][1], cur[1][1]\n x = x.strip('. ')\n a = a.strip('. ')\n if not re_aspects.search(x):\n return\n if a == 'Body, Human':\n a = 'the Human body'\n return x + ' of ' + flip_subject(a)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')\ndef read_subjects(rec):\n return get_subjects.read_subject(s)\n\n\n@deprecated\ndef combine_subjects(subjects):\n all_subjects = defaultdict(lambda: defaultdict(int))\n for a in subjects:\n for b, c in a.items():\n for d, e in c.items():\n all_subjects[b][d] += e\n return {k: dict(v) for k, v in all_subjects.items()}\n", "path": "openlibrary/catalog/marc/marc_subject.py"}]} | 2,310 | 126 |
gh_patches_debug_36850 | rasdani/github-patches | git_diff | buildbot__buildbot-4934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bitbucket Cloud web hook returns 500
Started three weeks ago (missed it due to vacations), my guess is that Bitbucket changed something. Error log looks like this:
```
2019-07-02 14:33:40+0300 [_GenericHTTPChannelProtocol,13,18.234.32.226] adding changes from web hook
Traceback (most recent call last):
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 106, in getAndSubmitChanges
changes, src = yield self.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator
return _cancellableInlineCallbacks(gen)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
--- <exception caught here> ---
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 106, in getAndSubmitChanges
changes, src = yield self.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 167, in getChanges
changes, src = yield handler.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 163, in getChanges
return self.process(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 54, in process
return handler(payload)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 97, in handle_repo_push
payload['actor']['username']),
builtins.KeyError: 'username'
```
Last line reveals the culprit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/www/hooks/bitbucketcloud.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15 # Copyright Mamba Team
16
17
18 import json
19
20 from twisted.python import log
21
22 from buildbot.util import bytes2unicode
23
24 GIT_BRANCH_REF = "refs/heads/{}"
25 GIT_MERGE_REF = "refs/pull-requests/{}/merge"
26 GIT_TAG_REF = "refs/tags/{}"
27
28 _HEADER_EVENT = b'X-Event-Key'
29
30
31 class BitbucketCloudEventHandler:
32
33 def __init__(self, master, options=None):
34 if options is None:
35 options = {}
36 self.master = master
37 if not isinstance(options, dict):
38 options = {}
39 self.options = options
40 self._codebase = self.options.get('codebase', None)
41
42 def process(self, request):
43 payload = self._get_payload(request)
44 event_type = request.getHeader(_HEADER_EVENT)
45 event_type = bytes2unicode(event_type)
46 log.msg("Processing event {header}: {event}"
47 .format(header=_HEADER_EVENT, event=event_type))
48 event_type = event_type.replace(":", "_")
49 handler = getattr(self, 'handle_{}'.format(event_type), None)
50
51 if handler is None:
52 raise ValueError('Unknown event: {}'.format(event_type))
53
54 return handler(payload)
55
56 def _get_payload(self, request):
57 content = request.content.read()
58 content = bytes2unicode(content)
59 content_type = request.getHeader(b'Content-Type')
60 content_type = bytes2unicode(content_type)
61 if content_type.startswith('application/json'):
62 payload = json.loads(content)
63 else:
64 raise ValueError('Unknown content type: {}'
65 .format(content_type))
66
67 log.msg("Payload: {}".format(payload))
68
69 return payload
70
71 def handle_repo_push(self, payload):
72 changes = []
73 project = payload['repository']['project']['name']
74 repo_url = payload['repository']['links']['self']['href']
75 web_url = payload['repository']['links']['html']['href']
76
77 for payload_change in payload['push']['changes']:
78 if payload_change['new']:
79 age = 'new'
80 category = 'push'
81 else: # when new is null the ref is deleted
82 age = 'old'
83 category = 'ref-deleted'
84
85 commit_hash = payload_change[age]['target']['hash']
86
87 if payload_change[age]['type'] == 'branch':
88 branch = GIT_BRANCH_REF.format(payload_change[age]['name'])
89 elif payload_change[age]['type'] == 'tag':
90 branch = GIT_TAG_REF.format(payload_change[age]['name'])
91
92 change = {
93 'revision': commit_hash,
94 'revlink': '{}/commits/{}'.format(web_url, commit_hash),
95 'repository': repo_url,
96 'author': '{} <{}>'.format(payload['actor']['display_name'],
97 payload['actor']['username']),
98 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),
99 'branch': branch,
100 'project': project,
101 'category': category
102 }
103
104 if callable(self._codebase):
105 change['codebase'] = self._codebase(payload)
106 elif self._codebase is not None:
107 change['codebase'] = self._codebase
108
109 changes.append(change)
110
111 return (changes, payload['repository']['scm'])
112
113 def handle_pullrequest_created(self, payload):
114 return self.handle_pullrequest(
115 payload,
116 GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
117 "pull-created")
118
119 def handle_pullrequest_updated(self, payload):
120 return self.handle_pullrequest(
121 payload,
122 GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
123 "pull-updated")
124
125 def handle_pullrequest_fulfilled(self, payload):
126 return self.handle_pullrequest(
127 payload,
128 GIT_BRANCH_REF.format(
129 payload['pullrequest']['toRef']['branch']['name']),
130 "pull-fulfilled")
131
132 def handle_pullrequest_rejected(self, payload):
133 return self.handle_pullrequest(
134 payload,
135 GIT_BRANCH_REF.format(
136 payload['pullrequest']['fromRef']['branch']['name']),
137 "pull-rejected")
138
139 def handle_pullrequest(self, payload, refname, category):
140 pr_number = int(payload['pullrequest']['id'])
141 repo_url = payload['repository']['links']['self']['href']
142 change = {
143 'revision': payload['pullrequest']['fromRef']['commit']['hash'],
144 'revlink': payload['pullrequest']['link'],
145 'repository': repo_url,
146 'author': '{} <{}>'.format(payload['actor']['display_name'],
147 payload['actor']['username']),
148 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),
149 'branch': refname,
150 'project': payload['repository']['project']['name'],
151 'category': category,
152 'properties': {'pullrequesturl': payload['pullrequest']['link']}
153 }
154
155 if callable(self._codebase):
156 change['codebase'] = self._codebase(payload)
157 elif self._codebase is not None:
158 change['codebase'] = self._codebase
159
160 return [change], payload['repository']['scm']
161
162 def getChanges(self, request):
163 return self.process(request)
164
165
166 bitbucketcloud = BitbucketCloudEventHandler
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/www/hooks/bitbucketcloud.py b/master/buildbot/www/hooks/bitbucketcloud.py
--- a/master/buildbot/www/hooks/bitbucketcloud.py
+++ b/master/buildbot/www/hooks/bitbucketcloud.py
@@ -70,7 +70,7 @@
def handle_repo_push(self, payload):
changes = []
- project = payload['repository']['project']['name']
+ project = payload['repository'].get('project', {'name': 'none'})['name']
repo_url = payload['repository']['links']['self']['href']
web_url = payload['repository']['links']['html']['href']
@@ -94,7 +94,7 @@
'revlink': '{}/commits/{}'.format(web_url, commit_hash),
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['display_name'],
- payload['actor']['username']),
+ payload['actor']['nickname']),
'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),
'branch': branch,
'project': project,
@@ -139,15 +139,16 @@
def handle_pullrequest(self, payload, refname, category):
pr_number = int(payload['pullrequest']['id'])
repo_url = payload['repository']['links']['self']['href']
+ project = payload['repository'].get('project', {'name': 'none'})['name']
change = {
'revision': payload['pullrequest']['fromRef']['commit']['hash'],
'revlink': payload['pullrequest']['link'],
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['display_name'],
- payload['actor']['username']),
+ payload['actor']['nickname']),
'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),
'branch': refname,
- 'project': payload['repository']['project']['name'],
+ 'project': project,
'category': category,
'properties': {'pullrequesturl': payload['pullrequest']['link']}
}
| {"golden_diff": "diff --git a/master/buildbot/www/hooks/bitbucketcloud.py b/master/buildbot/www/hooks/bitbucketcloud.py\n--- a/master/buildbot/www/hooks/bitbucketcloud.py\n+++ b/master/buildbot/www/hooks/bitbucketcloud.py\n@@ -70,7 +70,7 @@\n \n def handle_repo_push(self, payload):\n changes = []\n- project = payload['repository']['project']['name']\n+ project = payload['repository'].get('project', {'name': 'none'})['name']\n repo_url = payload['repository']['links']['self']['href']\n web_url = payload['repository']['links']['html']['href']\n \n@@ -94,7 +94,7 @@\n 'revlink': '{}/commits/{}'.format(web_url, commit_hash),\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n- payload['actor']['username']),\n+ payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),\n 'branch': branch,\n 'project': project,\n@@ -139,15 +139,16 @@\n def handle_pullrequest(self, payload, refname, category):\n pr_number = int(payload['pullrequest']['id'])\n repo_url = payload['repository']['links']['self']['href']\n+ project = payload['repository'].get('project', {'name': 'none'})['name']\n change = {\n 'revision': payload['pullrequest']['fromRef']['commit']['hash'],\n 'revlink': payload['pullrequest']['link'],\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n- payload['actor']['username']),\n+ payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),\n 'branch': refname,\n- 'project': payload['repository']['project']['name'],\n+ 'project': project,\n 'category': category,\n 'properties': {'pullrequesturl': payload['pullrequest']['link']}\n }\n", "issue": "Bitbucket Cloud web hook returns 500\nStarted three weeks ago (missed it due to vacations), my guess is that Bitbucket changed something. Error log looks like this:\r\n\r\n```\r\n2019-07-02 14:33:40+0300 [_GenericHTTPChannelProtocol,13,18.234.32.226] adding changes from web hook\r\n Traceback (most recent call last):\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 106, in getAndSubmitChanges\r\n changes, src = yield self.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1613, in unwindGenerator\r\n return _cancellableInlineCallbacks(gen)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1529, in _cancellableInlineCallbacks\r\n _inlineCallbacks(None, g, status)\r\n --- <exception caught here> ---\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 106, in getAndSubmitChanges\r\n changes, src = yield self.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 167, in getChanges\r\n changes, src = yield handler.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 163, in getChanges\r\n return self.process(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 54, in process\r\n return handler(payload)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 97, in handle_repo_push\r\n payload['actor']['username']),\r\n builtins.KeyError: 'username'\r\n```\r\n\r\nLast line reveals the culprit.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Copyright Mamba Team\n\n\nimport json\n\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\n\nGIT_BRANCH_REF = \"refs/heads/{}\"\nGIT_MERGE_REF = \"refs/pull-requests/{}/merge\"\nGIT_TAG_REF = \"refs/tags/{}\"\n\n_HEADER_EVENT = b'X-Event-Key'\n\n\nclass BitbucketCloudEventHandler:\n\n def __init__(self, master, options=None):\n if options is None:\n options = {}\n self.master = master\n if not isinstance(options, dict):\n options = {}\n self.options = options\n self._codebase = self.options.get('codebase', None)\n\n def process(self, request):\n payload = self._get_payload(request)\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n log.msg(\"Processing event {header}: {event}\"\n .format(header=_HEADER_EVENT, event=event_type))\n event_type = event_type.replace(\":\", \"_\")\n handler = getattr(self, 'handle_{}'.format(event_type), None)\n\n if handler is None:\n raise ValueError('Unknown event: {}'.format(event_type))\n\n return handler(payload)\n\n def _get_payload(self, request):\n content = request.content.read()\n content = bytes2unicode(content)\n content_type = request.getHeader(b'Content-Type')\n content_type = bytes2unicode(content_type)\n if content_type.startswith('application/json'):\n payload = json.loads(content)\n else:\n raise ValueError('Unknown content type: {}'\n .format(content_type))\n\n log.msg(\"Payload: {}\".format(payload))\n\n return payload\n\n def handle_repo_push(self, payload):\n changes = []\n project = payload['repository']['project']['name']\n repo_url = payload['repository']['links']['self']['href']\n web_url = payload['repository']['links']['html']['href']\n\n for payload_change in payload['push']['changes']:\n if payload_change['new']:\n age = 'new'\n category = 'push'\n else: # when new is null the ref is deleted\n age = 'old'\n category = 'ref-deleted'\n\n commit_hash = payload_change[age]['target']['hash']\n\n if payload_change[age]['type'] == 'branch':\n branch = GIT_BRANCH_REF.format(payload_change[age]['name'])\n elif payload_change[age]['type'] == 'tag':\n branch = GIT_TAG_REF.format(payload_change[age]['name'])\n\n change = {\n 'revision': commit_hash,\n 'revlink': '{}/commits/{}'.format(web_url, commit_hash),\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['username']),\n 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),\n 'branch': branch,\n 'project': project,\n 'category': category\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n changes.append(change)\n\n return (changes, payload['repository']['scm'])\n\n def handle_pullrequest_created(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-created\")\n\n def handle_pullrequest_updated(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-updated\")\n\n def handle_pullrequest_fulfilled(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['toRef']['branch']['name']),\n \"pull-fulfilled\")\n\n def handle_pullrequest_rejected(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['fromRef']['branch']['name']),\n \"pull-rejected\")\n\n def handle_pullrequest(self, payload, refname, category):\n pr_number = int(payload['pullrequest']['id'])\n repo_url = payload['repository']['links']['self']['href']\n change = {\n 'revision': payload['pullrequest']['fromRef']['commit']['hash'],\n 'revlink': payload['pullrequest']['link'],\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['username']),\n 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),\n 'branch': refname,\n 'project': payload['repository']['project']['name'],\n 'category': category,\n 'properties': {'pullrequesturl': payload['pullrequest']['link']}\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n return [change], payload['repository']['scm']\n\n def getChanges(self, request):\n return self.process(request)\n\n\nbitbucketcloud = BitbucketCloudEventHandler\n", "path": "master/buildbot/www/hooks/bitbucketcloud.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Copyright Mamba Team\n\n\nimport json\n\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\n\nGIT_BRANCH_REF = \"refs/heads/{}\"\nGIT_MERGE_REF = \"refs/pull-requests/{}/merge\"\nGIT_TAG_REF = \"refs/tags/{}\"\n\n_HEADER_EVENT = b'X-Event-Key'\n\n\nclass BitbucketCloudEventHandler:\n\n def __init__(self, master, options=None):\n if options is None:\n options = {}\n self.master = master\n if not isinstance(options, dict):\n options = {}\n self.options = options\n self._codebase = self.options.get('codebase', None)\n\n def process(self, request):\n payload = self._get_payload(request)\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n log.msg(\"Processing event {header}: {event}\"\n .format(header=_HEADER_EVENT, event=event_type))\n event_type = event_type.replace(\":\", \"_\")\n handler = getattr(self, 'handle_{}'.format(event_type), None)\n\n if handler is None:\n raise ValueError('Unknown event: {}'.format(event_type))\n\n return handler(payload)\n\n def _get_payload(self, request):\n content = request.content.read()\n content = bytes2unicode(content)\n content_type = request.getHeader(b'Content-Type')\n content_type = bytes2unicode(content_type)\n if content_type.startswith('application/json'):\n payload = json.loads(content)\n else:\n raise ValueError('Unknown content type: {}'\n .format(content_type))\n\n log.msg(\"Payload: {}\".format(payload))\n\n return payload\n\n def handle_repo_push(self, payload):\n changes = []\n project = payload['repository'].get('project', {'name': 'none'})['name']\n repo_url = payload['repository']['links']['self']['href']\n web_url = payload['repository']['links']['html']['href']\n\n for payload_change in payload['push']['changes']:\n if payload_change['new']:\n age = 'new'\n category = 'push'\n else: # when new is null the ref is deleted\n age = 'old'\n category = 'ref-deleted'\n\n commit_hash = payload_change[age]['target']['hash']\n\n if payload_change[age]['type'] == 'branch':\n branch = GIT_BRANCH_REF.format(payload_change[age]['name'])\n elif payload_change[age]['type'] == 'tag':\n branch = GIT_TAG_REF.format(payload_change[age]['name'])\n\n change = {\n 'revision': commit_hash,\n 'revlink': '{}/commits/{}'.format(web_url, commit_hash),\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),\n 'branch': branch,\n 'project': project,\n 'category': category\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n changes.append(change)\n\n return (changes, payload['repository']['scm'])\n\n def handle_pullrequest_created(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-created\")\n\n def handle_pullrequest_updated(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-updated\")\n\n def handle_pullrequest_fulfilled(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['toRef']['branch']['name']),\n \"pull-fulfilled\")\n\n def handle_pullrequest_rejected(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['fromRef']['branch']['name']),\n \"pull-rejected\")\n\n def handle_pullrequest(self, payload, refname, category):\n pr_number = int(payload['pullrequest']['id'])\n repo_url = payload['repository']['links']['self']['href']\n project = payload['repository'].get('project', {'name': 'none'})['name']\n change = {\n 'revision': payload['pullrequest']['fromRef']['commit']['hash'],\n 'revlink': payload['pullrequest']['link'],\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),\n 'branch': refname,\n 'project': project,\n 'category': category,\n 'properties': {'pullrequesturl': payload['pullrequest']['link']}\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n return [change], payload['repository']['scm']\n\n def getChanges(self, request):\n return self.process(request)\n\n\nbitbucketcloud = BitbucketCloudEventHandler\n", "path": "master/buildbot/www/hooks/bitbucketcloud.py"}]} | 2,499 | 446 |
gh_patches_debug_21591 | rasdani/github-patches | git_diff | microsoft__torchgeo-16 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improvements to VHR-10 documentation
Need better docs describing the format of the dataset.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/datasets/nwpu.py`
Content:
```
1 import os
2 from typing import Any, Callable, Optional, Tuple
3
4 from PIL import Image
5 from torchvision.datasets import VisionDataset
6 from torchvision.datasets.utils import (
7 check_integrity,
8 download_file_from_google_drive,
9 download_url,
10 )
11
12
13 class VHR10(VisionDataset):
14 """`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.
15
16 Includes object detection bounding boxes from original paper and instance
17 segmentation masks from follow-up publications. If you use this dataset in your
18 research, please cite the following papers:
19
20 * https://doi.org/10.1016/j.isprsjprs.2014.10.002
21 * https://doi.org/10.1109/IGARSS.2019.8898573
22 * https://doi.org/10.3390/rs12060989
23 """
24
25 base_folder = "vhr10"
26 image_meta = {
27 "file_id": "1--foZ3dV5OCsqXQXT84UeKtrAqc5CkAE",
28 "filename": "NWPU VHR-10 dataset.rar",
29 "md5": "d30a7ff99d92123ebb0b3a14d9102081",
30 }
31 target_meta = {
32 "url": (
33 "https://raw.githubusercontent.com/chaozhong2010/VHR-10_dataset_coco/"
34 "master/NWPU%20VHR-10_dataset_coco/annotations.json"
35 ),
36 "filename": "annotations.json",
37 "md5": "7c76ec50c17a61bb0514050d20f22c08",
38 }
39
40 def __init__(
41 self,
42 root: str,
43 transform: Optional[Callable[[Any], Any]] = None,
44 target_transform: Optional[Callable[[Any], Any]] = None,
45 transforms: Optional[Callable[[Any], Any]] = None,
46 download: bool = False,
47 ) -> None:
48 """Initialize a new VHR-10 dataset instance.
49
50 Parameters:
51 root: root directory where dataset can be found
52 transform: a function/transform that takes in a PIL image and returns a
53 transformed version
54 target_transform: a function/transform that takes in the target and
55 transforms it
56 transforms: a function/transform that takes input sample and its target as
57 entry and returns a transformed version
58 download: if True, download dataset and store it in the root directory
59 """
60 super().__init__(root, transforms, transform, target_transform)
61
62 if download:
63 self.download()
64
65 if not self._check_integrity():
66 raise RuntimeError(
67 "Dataset not found or corrupted. "
68 + "You can use download=True to download it"
69 )
70
71 # Must be installed to parse annotations file
72 from pycocotools.coco import COCO
73
74 self.coco = COCO(
75 os.path.join(
76 self.root,
77 self.base_folder,
78 "NWPU VHR-10 dataset",
79 self.target_meta["filename"],
80 )
81 )
82 self.ids = list(sorted(self.coco.imgs.keys()))
83
84 def __getitem__(self, index: int) -> Tuple[Any, Any]:
85 """Return an index within the dataset.
86
87 Parameters:
88 idx: index to return
89
90 Returns:
91 data and label at that index
92 """
93 id = self.ids[index]
94 image = self._load_image(id)
95 annot = self._load_target(id)
96
97 target = dict(image_id=id, annotations=annot)
98
99 if self.transforms is not None:
100 image, target = self.transforms(image, target)
101
102 return image, target
103
104 def __len__(self) -> int:
105 """Return the number of data points in the dataset.
106
107 Returns:
108 length of the dataset
109 """
110 return len(self.ids)
111
112 def _load_image(self, id: int) -> Image.Image:
113 """Load a single image.
114
115 Parameters:
116 id: unique ID of the image
117
118 Returns:
119 the image
120 """
121 path = self.coco.loadImgs(id)[0]["file_name"]
122 return Image.open(
123 os.path.join(
124 self.root,
125 self.base_folder,
126 "NWPU VHR-10 dataset",
127 "positive image set",
128 path,
129 )
130 ).convert("RGB")
131
132 def _load_target(self, id: int) -> Any:
133 """Load the annotations for a single image.
134
135 Parameters:
136 id: unique ID of the image
137
138 Returns:
139 the annotations
140 """
141 return self.coco.loadAnns(self.coco.getAnnIds(id))
142
143 def _check_integrity(self) -> bool:
144 """Check integrity of dataset.
145
146 Returns:
147 True if dataset MD5s match, else False
148 """
149 image: bool = check_integrity(
150 os.path.join(self.root, self.base_folder, self.image_meta["filename"]),
151 self.image_meta["md5"],
152 )
153 target: bool = check_integrity(
154 os.path.join(
155 self.root,
156 self.base_folder,
157 "NWPU VHR-10 dataset",
158 self.target_meta["filename"],
159 ),
160 self.target_meta["md5"],
161 )
162 return image and target
163
164 def download(self) -> None:
165 """Download the dataset and extract it."""
166
167 if self._check_integrity():
168 print("Files already downloaded and verified")
169 return
170
171 download_file_from_google_drive(
172 self.image_meta["file_id"],
173 os.path.join(self.root, self.base_folder),
174 self.image_meta["filename"],
175 self.image_meta["md5"],
176 )
177
178 # Must be installed to extract RAR file
179 import rarfile
180
181 with rarfile.RarFile(
182 os.path.join(self.root, self.base_folder, self.image_meta["filename"])
183 ) as f:
184 f.extractall(os.path.join(self.root, self.base_folder))
185
186 download_url(
187 self.target_meta["url"],
188 os.path.join(self.root, self.base_folder, "NWPU VHR-10 dataset"),
189 self.target_meta["filename"],
190 self.target_meta["md5"],
191 )
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/datasets/nwpu.py b/torchgeo/datasets/nwpu.py
--- a/torchgeo/datasets/nwpu.py
+++ b/torchgeo/datasets/nwpu.py
@@ -11,7 +11,31 @@
class VHR10(VisionDataset):
- """`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.
+ """Northwestern Polytechnical University (NWPU) very-high-resolution ten-class
+ (VHR-10) remote sensing image dataset.
+
+ Consists of 800 VHR optical remote sensing images, where 715 color images were
+ acquired from Google Earth with the spatial resolution ranging from 0.5 to 2 m,
+ and 85 pansharpened color infrared (CIR) images were acquired from Vaihingen data
+ with a spatial resolution of 0.08 m.
+
+ The data set is divided into two sets:
+
+ 1. Positive image set (650 images) which contains at least one target in an image
+ 2. Negative image set (150 images) does not contain any targets
+
+ The positive image set consists of objects from ten classes:
+
+ 1. Airplanes (757)
+ 2. Ships (302)
+ 3. Storage tanks (655)
+ 4. Baseball diamonds (390)
+ 5. Tennis courts (524)
+ 6. Basketball courts (159)
+ 7. Ground track fields (163)
+ 8. Harbors (224)
+ 9. Bridges (124)
+ 10. Vehicles (477)
Includes object detection bounding boxes from original paper and instance
segmentation masks from follow-up publications. If you use this dataset in your
| {"golden_diff": "diff --git a/torchgeo/datasets/nwpu.py b/torchgeo/datasets/nwpu.py\n--- a/torchgeo/datasets/nwpu.py\n+++ b/torchgeo/datasets/nwpu.py\n@@ -11,7 +11,31 @@\n \n \n class VHR10(VisionDataset):\n- \"\"\"`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.\n+ \"\"\"Northwestern Polytechnical University (NWPU) very-high-resolution ten-class\n+ (VHR-10) remote sensing image dataset.\n+\n+ Consists of 800 VHR optical remote sensing images, where 715 color images were\n+ acquired from Google Earth with the spatial resolution ranging from 0.5 to 2 m,\n+ and 85 pansharpened color infrared (CIR) images were acquired from Vaihingen data\n+ with a spatial resolution of 0.08 m.\n+\n+ The data set is divided into two sets:\n+\n+ 1. Positive image set (650 images) which contains at least one target in an image\n+ 2. Negative image set (150 images) does not contain any targets\n+\n+ The positive image set consists of objects from ten classes:\n+\n+ 1. Airplanes (757)\n+ 2. Ships (302)\n+ 3. Storage tanks (655)\n+ 4. Baseball diamonds (390)\n+ 5. Tennis courts (524)\n+ 6. Basketball courts (159)\n+ 7. Ground track fields (163)\n+ 8. Harbors (224)\n+ 9. Bridges (124)\n+ 10. Vehicles (477)\n \n Includes object detection bounding boxes from original paper and instance\n segmentation masks from follow-up publications. If you use this dataset in your\n", "issue": "Improvements to VHR-10 documentation\nNeed better docs describing the format of the dataset.\n", "before_files": [{"content": "import os\nfrom typing import Any, Callable, Optional, Tuple\n\nfrom PIL import Image\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.datasets.utils import (\n check_integrity,\n download_file_from_google_drive,\n download_url,\n)\n\n\nclass VHR10(VisionDataset):\n \"\"\"`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.\n\n Includes object detection bounding boxes from original paper and instance\n segmentation masks from follow-up publications. If you use this dataset in your\n research, please cite the following papers:\n\n * https://doi.org/10.1016/j.isprsjprs.2014.10.002\n * https://doi.org/10.1109/IGARSS.2019.8898573\n * https://doi.org/10.3390/rs12060989\n \"\"\"\n\n base_folder = \"vhr10\"\n image_meta = {\n \"file_id\": \"1--foZ3dV5OCsqXQXT84UeKtrAqc5CkAE\",\n \"filename\": \"NWPU VHR-10 dataset.rar\",\n \"md5\": \"d30a7ff99d92123ebb0b3a14d9102081\",\n }\n target_meta = {\n \"url\": (\n \"https://raw.githubusercontent.com/chaozhong2010/VHR-10_dataset_coco/\"\n \"master/NWPU%20VHR-10_dataset_coco/annotations.json\"\n ),\n \"filename\": \"annotations.json\",\n \"md5\": \"7c76ec50c17a61bb0514050d20f22c08\",\n }\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable[[Any], Any]] = None,\n target_transform: Optional[Callable[[Any], Any]] = None,\n transforms: Optional[Callable[[Any], Any]] = None,\n download: bool = False,\n ) -> None:\n \"\"\"Initialize a new VHR-10 dataset instance.\n\n Parameters:\n root: root directory where dataset can be found\n transform: a function/transform that takes in a PIL image and returns a\n transformed version\n target_transform: a function/transform that takes in the target and\n transforms it\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n \"\"\"\n super().__init__(root, transforms, transform, target_transform)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n \"Dataset not found or corrupted. \"\n + \"You can use download=True to download it\"\n )\n\n # Must be installed to parse annotations file\n from pycocotools.coco import COCO\n\n self.coco = COCO(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n )\n )\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"Return an index within the dataset.\n\n Parameters:\n idx: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n id = self.ids[index]\n image = self._load_image(id)\n annot = self._load_target(id)\n\n target = dict(image_id=id, annotations=annot)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.ids)\n\n def _load_image(self, id: int) -> Image.Image:\n \"\"\"Load a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the image\n \"\"\"\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n \"positive image set\",\n path,\n )\n ).convert(\"RGB\")\n\n def _load_target(self, id: int) -> Any:\n \"\"\"Load the annotations for a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the annotations\n \"\"\"\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset.\n\n Returns:\n True if dataset MD5s match, else False\n \"\"\"\n image: bool = check_integrity(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"]),\n self.image_meta[\"md5\"],\n )\n target: bool = check_integrity(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n ),\n self.target_meta[\"md5\"],\n )\n return image and target\n\n def download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_file_from_google_drive(\n self.image_meta[\"file_id\"],\n os.path.join(self.root, self.base_folder),\n self.image_meta[\"filename\"],\n self.image_meta[\"md5\"],\n )\n\n # Must be installed to extract RAR file\n import rarfile\n\n with rarfile.RarFile(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"])\n ) as f:\n f.extractall(os.path.join(self.root, self.base_folder))\n\n download_url(\n self.target_meta[\"url\"],\n os.path.join(self.root, self.base_folder, \"NWPU VHR-10 dataset\"),\n self.target_meta[\"filename\"],\n self.target_meta[\"md5\"],\n )\n", "path": "torchgeo/datasets/nwpu.py"}], "after_files": [{"content": "import os\nfrom typing import Any, Callable, Optional, Tuple\n\nfrom PIL import Image\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.datasets.utils import (\n check_integrity,\n download_file_from_google_drive,\n download_url,\n)\n\n\nclass VHR10(VisionDataset):\n \"\"\"Northwestern Polytechnical University (NWPU) very-high-resolution ten-class\n (VHR-10) remote sensing image dataset.\n\n Consists of 800 VHR optical remote sensing images, where 715 color images were\n acquired from Google Earth with the spatial resolution ranging from 0.5 to 2 m,\n and 85 pansharpened color infrared (CIR) images were acquired from Vaihingen data\n with a spatial resolution of 0.08 m.\n\n The data set is divided into two sets:\n\n 1. Positive image set (650 images) which contains at least one target in an image\n 2. Negative image set (150 images) does not contain any targets\n\n The positive image set consists of objects from ten classes:\n\n 1. Airplanes (757)\n 2. Ships (302)\n 3. Storage tanks (655)\n 4. Baseball diamonds (390)\n 5. Tennis courts (524)\n 6. Basketball courts (159)\n 7. Ground track fields (163)\n 8. Harbors (224)\n 9. Bridges (124)\n 10. Vehicles (477)\n\n Includes object detection bounding boxes from original paper and instance\n segmentation masks from follow-up publications. If you use this dataset in your\n research, please cite the following papers:\n\n * https://doi.org/10.1016/j.isprsjprs.2014.10.002\n * https://doi.org/10.1109/IGARSS.2019.8898573\n * https://doi.org/10.3390/rs12060989\n \"\"\"\n\n base_folder = \"vhr10\"\n image_meta = {\n \"file_id\": \"1--foZ3dV5OCsqXQXT84UeKtrAqc5CkAE\",\n \"filename\": \"NWPU VHR-10 dataset.rar\",\n \"md5\": \"d30a7ff99d92123ebb0b3a14d9102081\",\n }\n target_meta = {\n \"url\": (\n \"https://raw.githubusercontent.com/chaozhong2010/VHR-10_dataset_coco/\"\n \"master/NWPU%20VHR-10_dataset_coco/annotations.json\"\n ),\n \"filename\": \"annotations.json\",\n \"md5\": \"7c76ec50c17a61bb0514050d20f22c08\",\n }\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable[[Any], Any]] = None,\n target_transform: Optional[Callable[[Any], Any]] = None,\n transforms: Optional[Callable[[Any], Any]] = None,\n download: bool = False,\n ) -> None:\n \"\"\"Initialize a new VHR-10 dataset instance.\n\n Parameters:\n root: root directory where dataset can be found\n transform: a function/transform that takes in a PIL image and returns a\n transformed version\n target_transform: a function/transform that takes in the target and\n transforms it\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n \"\"\"\n super().__init__(root, transforms, transform, target_transform)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n \"Dataset not found or corrupted. \"\n + \"You can use download=True to download it\"\n )\n\n # Must be installed to parse annotations file\n from pycocotools.coco import COCO\n\n self.coco = COCO(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n )\n )\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"Return an index within the dataset.\n\n Parameters:\n idx: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n id = self.ids[index]\n image = self._load_image(id)\n annot = self._load_target(id)\n\n target = dict(image_id=id, annotations=annot)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.ids)\n\n def _load_image(self, id: int) -> Image.Image:\n \"\"\"Load a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the image\n \"\"\"\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n \"positive image set\",\n path,\n )\n ).convert(\"RGB\")\n\n def _load_target(self, id: int) -> Any:\n \"\"\"Load the annotations for a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the annotations\n \"\"\"\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset.\n\n Returns:\n True if dataset MD5s match, else False\n \"\"\"\n image: bool = check_integrity(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"]),\n self.image_meta[\"md5\"],\n )\n target: bool = check_integrity(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n ),\n self.target_meta[\"md5\"],\n )\n return image and target\n\n def download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_file_from_google_drive(\n self.image_meta[\"file_id\"],\n os.path.join(self.root, self.base_folder),\n self.image_meta[\"filename\"],\n self.image_meta[\"md5\"],\n )\n\n # Must be installed to extract RAR file\n import rarfile\n\n with rarfile.RarFile(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"])\n ) as f:\n f.extractall(os.path.join(self.root, self.base_folder))\n\n download_url(\n self.target_meta[\"url\"],\n os.path.join(self.root, self.base_folder, \"NWPU VHR-10 dataset\"),\n self.target_meta[\"filename\"],\n self.target_meta[\"md5\"],\n )\n", "path": "torchgeo/datasets/nwpu.py"}]} | 2,168 | 456 |
gh_patches_debug_14225 | rasdani/github-patches | git_diff | rucio__rucio-6130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keyword `type` gets overwritten
Motivation
----------
The keyword `type` in identities.py gets overwritten, this is a bad practice.
https://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134
Modification
------------
Use another variable name.
Keyword `type` gets overwritten
Motivation
----------
The keyword `type` in identities.py gets overwritten, this is a bad practice.
https://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134
Modification
------------
Use another variable name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/web/rest/flaskapi/v1/identities.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright European Organization for Nuclear Research (CERN) since 2012
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from flask import Flask, request, jsonify
17
18 from rucio.api.identity import add_identity, add_account_identity, list_accounts_for_identity
19 from rucio.web.rest.flaskapi.v1.common import response_headers, check_accept_header_wrapper_flask, \
20 ErrorHandlingMethodView
21 from rucio.web.rest.flaskapi.authenticated_bp import AuthenticatedBlueprint
22
23
24 class UserPass(ErrorHandlingMethodView):
25 """ Manage a username/password identity for an account. """
26
27 def put(self, account):
28 """
29 ---
30 summary: Create UserPass identity
31 description: Creates a new UserPass identity and maps it to an account.
32 tags:
33 - Identity
34 parameters:
35 - name: account
36 in: path
37 description: The account for the identity.
38 schema:
39 type: string
40 style: simple
41 - name: X-Rucio-Username
42 in: query
43 description: Username for the identity.
44 schema:
45 type: string
46 style: simple
47 required: true
48 - name: X-Rucio-Password
49 in: query
50 description: The password for the identity.
51 schema:
52 type: string
53 style: simple
54 required: true
55 - name: X-Rucio-Email
56 in: query
57 description: The email for the identity.
58 schema:
59 type: string
60 style: simple
61 required: false
62 responses:
63 201:
64 description: OK
65 content:
66 application/json:
67 schema:
68 type: string
69 enum: ['Created']
70 401:
71 description: Invalid Auth Token
72 400:
73 description: Missing username or password.
74 """
75 username = request.headers.get('X-Rucio-Username', default=None)
76 password = request.headers.get('X-Rucio-Password', default=None)
77 email = request.headers.get('X-Rucio-Email', default=None)
78
79 if not username or not password:
80 return 'Username and Password must be set.', 400
81
82 add_identity(username, 'userpass', email, password)
83
84 add_account_identity(
85 identity_key=username,
86 id_type='userpass',
87 account=account,
88 email=email,
89 password=password,
90 issuer=request.environ.get('issuer'),
91 vo=request.environ.get('vo'),
92 )
93
94 return 'Created', 201
95
96
97 class X509(ErrorHandlingMethodView):
98 """ Manage an x509 identity for an account. """
99
100 def put(self, account):
101 """
102 ---
103 summary: Create X509 identity
104 description: Creates a new X509 identity and maps it to an account.
105 tags:
106 - Identity
107 parameters:
108 - name: account
109 in: path
110 description: The account for the identity.
111 schema:
112 type: string
113 style: simple
114 - name: X-Rucio-Email
115 in: query
116 description: The email for the identity.
117 schema:
118 type: string
119 style: simple
120 required: false
121 responses:
122 201:
123 description: OK
124 content:
125 application/json:
126 schema:
127 type: string
128 enum: ['Created']
129 401:
130 description: Invalid Auth Token
131 """
132 dn = request.environ.get('SSL_CLIENT_S_DN')
133 email = request.headers.get('X-Rucio-Email', default=None)
134
135 add_identity(dn, 'x509', email=email)
136 add_account_identity(
137 identity_key=dn,
138 id_type='x509',
139 account=account,
140 email=email,
141 issuer=request.environ.get('issuer'),
142 vo=request.environ.get('vo'),
143 )
144
145 return 'Created', 201
146
147
148 class GSS(ErrorHandlingMethodView):
149 """ Manage a GSS identity for an account. """
150
151 def put(self, account):
152 """
153 ---
154 summary: Create GSS identity
155 description: Creates a new GSS identity and maps it to an account.
156 tags:
157 - Identity
158 parameters:
159 - name: account
160 in: path
161 description: The account for the identity.
162 schema:
163 type: string
164 style: simple
165 - name: X-Rucio-Email
166 in: query
167 description: The email for the identity.
168 schema:
169 type: string
170 style: simple
171 required: false
172 responses:
173 201:
174 description: OK
175 content:
176 application/json:
177 schema:
178 type: string
179 enum: ['Created']
180 401:
181 description: Invalid Auth Token
182 """
183 gsscred = request.environ.get('REMOTE_USER')
184 email = request.headers.get('X-Rucio-Email', default=None)
185
186 add_identity(gsscred, 'gss', email=email)
187 add_account_identity(
188 identity_key=gsscred,
189 id_type='gss',
190 account=account,
191 email=email,
192 issuer=request.environ.get('issuer'),
193 vo=request.environ.get('vo'),
194 )
195
196 return 'Created', 201
197
198
199 class Accounts(ErrorHandlingMethodView):
200 """ Retrieve list of accounts mapped to an identity. """
201
202 @check_accept_header_wrapper_flask(['application/json'])
203 def get(self, identity_key, type):
204 """
205 ---
206 summary: List
207 description: List all identities mapped to an account.
208 tags:
209 - Identity
210 parameters:
211 - name: identity_key
212 in: path
213 description: Identity string.
214 schema:
215 type: string
216 style: simple
217 - name: type
218 in: path
219 description: Identity type.
220 schema:
221 type: string
222 style: simple
223 required: false
224 responses:
225 200:
226 description: OK
227 content:
228 application/json:
229 schema:
230 type: array
231 items:
232 type: object
233 description: Account for the identity.
234 401:
235 description: Invalid Auth Token
236 401:
237 description: Not acceptable
238 """
239 accounts = list_accounts_for_identity(identity_key, type)
240 return jsonify(accounts)
241
242
243 def blueprint():
244 bp = AuthenticatedBlueprint('identities', __name__, url_prefix='/identities')
245
246 userpass_view = UserPass.as_view('userpass')
247 bp.add_url_rule('/<account>/userpass', view_func=userpass_view, methods=['put', ])
248 x509_view = X509.as_view('x509')
249 bp.add_url_rule('/<account>/x509', view_func=x509_view, methods=['put', ])
250 gss_view = GSS.as_view('gss')
251 bp.add_url_rule('/<account>/gss', view_func=gss_view, methods=['put', ])
252 accounts_view = Accounts.as_view('accounts')
253 bp.add_url_rule('/<identity_key>/<type>/accounts', view_func=accounts_view, methods=['get', ])
254
255 bp.after_request(response_headers)
256 return bp
257
258
259 def make_doc():
260 """ Only used for sphinx documentation """
261 doc_app = Flask(__name__)
262 doc_app.register_blueprint(blueprint())
263 return doc_app
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/web/rest/flaskapi/v1/identities.py b/lib/rucio/web/rest/flaskapi/v1/identities.py
--- a/lib/rucio/web/rest/flaskapi/v1/identities.py
+++ b/lib/rucio/web/rest/flaskapi/v1/identities.py
@@ -200,7 +200,7 @@
""" Retrieve list of accounts mapped to an identity. """
@check_accept_header_wrapper_flask(['application/json'])
- def get(self, identity_key, type):
+ def get(self, identity_key, type_):
"""
---
summary: List
@@ -236,7 +236,7 @@
401:
description: Not acceptable
"""
- accounts = list_accounts_for_identity(identity_key, type)
+ accounts = list_accounts_for_identity(identity_key, type_)
return jsonify(accounts)
| {"golden_diff": "diff --git a/lib/rucio/web/rest/flaskapi/v1/identities.py b/lib/rucio/web/rest/flaskapi/v1/identities.py\n--- a/lib/rucio/web/rest/flaskapi/v1/identities.py\n+++ b/lib/rucio/web/rest/flaskapi/v1/identities.py\n@@ -200,7 +200,7 @@\n \"\"\" Retrieve list of accounts mapped to an identity. \"\"\"\n \n @check_accept_header_wrapper_flask(['application/json'])\n- def get(self, identity_key, type):\n+ def get(self, identity_key, type_):\n \"\"\"\n ---\n summary: List\n@@ -236,7 +236,7 @@\n 401:\n description: Not acceptable\n \"\"\"\n- accounts = list_accounts_for_identity(identity_key, type)\n+ accounts = list_accounts_for_identity(identity_key, type_)\n return jsonify(accounts)\n", "issue": "Keyword `type` gets overwritten\nMotivation\r\n----------\r\nThe keyword `type` in identities.py gets overwritten, this is a bad practice.\r\nhttps://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134\r\n\r\nModification\r\n------------\r\nUse another variable name.\nKeyword `type` gets overwritten\nMotivation\r\n----------\r\nThe keyword `type` in identities.py gets overwritten, this is a bad practice.\r\nhttps://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134\r\n\r\nModification\r\n------------\r\nUse another variable name.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import Flask, request, jsonify\n\nfrom rucio.api.identity import add_identity, add_account_identity, list_accounts_for_identity\nfrom rucio.web.rest.flaskapi.v1.common import response_headers, check_accept_header_wrapper_flask, \\\n ErrorHandlingMethodView\nfrom rucio.web.rest.flaskapi.authenticated_bp import AuthenticatedBlueprint\n\n\nclass UserPass(ErrorHandlingMethodView):\n \"\"\" Manage a username/password identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create UserPass identity\n description: Creates a new UserPass identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Username\n in: query\n description: Username for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Password\n in: query\n description: The password for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n 400:\n description: Missing username or password.\n \"\"\"\n username = request.headers.get('X-Rucio-Username', default=None)\n password = request.headers.get('X-Rucio-Password', default=None)\n email = request.headers.get('X-Rucio-Email', default=None)\n\n if not username or not password:\n return 'Username and Password must be set.', 400\n\n add_identity(username, 'userpass', email, password)\n\n add_account_identity(\n identity_key=username,\n id_type='userpass',\n account=account,\n email=email,\n password=password,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass X509(ErrorHandlingMethodView):\n \"\"\" Manage an x509 identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create X509 identity\n description: Creates a new X509 identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n dn = request.environ.get('SSL_CLIENT_S_DN')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(dn, 'x509', email=email)\n add_account_identity(\n identity_key=dn,\n id_type='x509',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass GSS(ErrorHandlingMethodView):\n \"\"\" Manage a GSS identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create GSS identity\n description: Creates a new GSS identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n gsscred = request.environ.get('REMOTE_USER')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(gsscred, 'gss', email=email)\n add_account_identity(\n identity_key=gsscred,\n id_type='gss',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass Accounts(ErrorHandlingMethodView):\n \"\"\" Retrieve list of accounts mapped to an identity. \"\"\"\n\n @check_accept_header_wrapper_flask(['application/json'])\n def get(self, identity_key, type):\n \"\"\"\n ---\n summary: List\n description: List all identities mapped to an account.\n tags:\n - Identity\n parameters:\n - name: identity_key\n in: path\n description: Identity string.\n schema:\n type: string\n style: simple\n - name: type\n in: path\n description: Identity type.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 200:\n description: OK\n content:\n application/json:\n schema:\n type: array\n items:\n type: object\n description: Account for the identity.\n 401:\n description: Invalid Auth Token\n 401:\n description: Not acceptable\n \"\"\"\n accounts = list_accounts_for_identity(identity_key, type)\n return jsonify(accounts)\n\n\ndef blueprint():\n bp = AuthenticatedBlueprint('identities', __name__, url_prefix='/identities')\n\n userpass_view = UserPass.as_view('userpass')\n bp.add_url_rule('/<account>/userpass', view_func=userpass_view, methods=['put', ])\n x509_view = X509.as_view('x509')\n bp.add_url_rule('/<account>/x509', view_func=x509_view, methods=['put', ])\n gss_view = GSS.as_view('gss')\n bp.add_url_rule('/<account>/gss', view_func=gss_view, methods=['put', ])\n accounts_view = Accounts.as_view('accounts')\n bp.add_url_rule('/<identity_key>/<type>/accounts', view_func=accounts_view, methods=['get', ])\n\n bp.after_request(response_headers)\n return bp\n\n\ndef make_doc():\n \"\"\" Only used for sphinx documentation \"\"\"\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint())\n return doc_app\n", "path": "lib/rucio/web/rest/flaskapi/v1/identities.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import Flask, request, jsonify\n\nfrom rucio.api.identity import add_identity, add_account_identity, list_accounts_for_identity\nfrom rucio.web.rest.flaskapi.v1.common import response_headers, check_accept_header_wrapper_flask, \\\n ErrorHandlingMethodView\nfrom rucio.web.rest.flaskapi.authenticated_bp import AuthenticatedBlueprint\n\n\nclass UserPass(ErrorHandlingMethodView):\n \"\"\" Manage a username/password identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create UserPass identity\n description: Creates a new UserPass identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Username\n in: query\n description: Username for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Password\n in: query\n description: The password for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n 400:\n description: Missing username or password.\n \"\"\"\n username = request.headers.get('X-Rucio-Username', default=None)\n password = request.headers.get('X-Rucio-Password', default=None)\n email = request.headers.get('X-Rucio-Email', default=None)\n\n if not username or not password:\n return 'Username and Password must be set.', 400\n\n add_identity(username, 'userpass', email, password)\n\n add_account_identity(\n identity_key=username,\n id_type='userpass',\n account=account,\n email=email,\n password=password,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass X509(ErrorHandlingMethodView):\n \"\"\" Manage an x509 identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create X509 identity\n description: Creates a new X509 identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n dn = request.environ.get('SSL_CLIENT_S_DN')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(dn, 'x509', email=email)\n add_account_identity(\n identity_key=dn,\n id_type='x509',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass GSS(ErrorHandlingMethodView):\n \"\"\" Manage a GSS identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create GSS identity\n description: Creates a new GSS identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n gsscred = request.environ.get('REMOTE_USER')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(gsscred, 'gss', email=email)\n add_account_identity(\n identity_key=gsscred,\n id_type='gss',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass Accounts(ErrorHandlingMethodView):\n \"\"\" Retrieve list of accounts mapped to an identity. \"\"\"\n\n @check_accept_header_wrapper_flask(['application/json'])\n def get(self, identity_key, type_):\n \"\"\"\n ---\n summary: List\n description: List all identities mapped to an account.\n tags:\n - Identity\n parameters:\n - name: identity_key\n in: path\n description: Identity string.\n schema:\n type: string\n style: simple\n - name: type\n in: path\n description: Identity type.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 200:\n description: OK\n content:\n application/json:\n schema:\n type: array\n items:\n type: object\n description: Account for the identity.\n 401:\n description: Invalid Auth Token\n 401:\n description: Not acceptable\n \"\"\"\n accounts = list_accounts_for_identity(identity_key, type_)\n return jsonify(accounts)\n\n\ndef blueprint():\n bp = AuthenticatedBlueprint('identities', __name__, url_prefix='/identities')\n\n userpass_view = UserPass.as_view('userpass')\n bp.add_url_rule('/<account>/userpass', view_func=userpass_view, methods=['put', ])\n x509_view = X509.as_view('x509')\n bp.add_url_rule('/<account>/x509', view_func=x509_view, methods=['put', ])\n gss_view = GSS.as_view('gss')\n bp.add_url_rule('/<account>/gss', view_func=gss_view, methods=['put', ])\n accounts_view = Accounts.as_view('accounts')\n bp.add_url_rule('/<identity_key>/<type>/accounts', view_func=accounts_view, methods=['get', ])\n\n bp.after_request(response_headers)\n return bp\n\n\ndef make_doc():\n \"\"\" Only used for sphinx documentation \"\"\"\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint())\n return doc_app\n", "path": "lib/rucio/web/rest/flaskapi/v1/identities.py"}]} | 2,889 | 202 |
gh_patches_debug_51093 | rasdani/github-patches | git_diff | ivy-llc__ivy-16195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
argmax
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/tensor.py`
Content:
```
1 # local
2 import ivy
3 import ivy.functional.frontends.paddle as paddle_frontend
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 _to_ivy_array,
6 )
7 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
8
9
10 class Tensor:
11 def __init__(self, array, dtype=None, place="cpu", stop_gradient=True):
12 self._ivy_array = (
13 ivy.array(array, dtype=dtype, device=place)
14 if not isinstance(array, ivy.Array)
15 else array
16 )
17 self._dtype = dtype
18 self._place = place
19 self._stop_gradient = stop_gradient
20
21 def __repr__(self):
22 return (
23 str(self._ivy_array.__repr__())
24 .replace("ivy.array", "ivy.frontends.paddle.Tensor")
25 .replace("dev", "place")
26 )
27
28 # Properties #
29 # ---------- #
30
31 @property
32 def ivy_array(self):
33 return self._ivy_array
34
35 @property
36 def place(self):
37 return self.ivy_array.device
38
39 @property
40 def dtype(self):
41 return self._ivy_array.dtype
42
43 @property
44 def shape(self):
45 return self._ivy_array.shape
46
47 @property
48 def ndim(self):
49 return self.dim()
50
51 # Setters #
52 # --------#
53
54 @ivy_array.setter
55 def ivy_array(self, array):
56 self._ivy_array = (
57 ivy.array(array) if not isinstance(array, ivy.Array) else array
58 )
59
60 # Special Methods #
61 # -------------------#
62
63 def __getitem__(self, item):
64 ivy_args = ivy.nested_map([self, item], _to_ivy_array)
65 ret = ivy.get_item(*ivy_args)
66 return paddle_frontend.Tensor(ret)
67
68 def __setitem__(self, item, value):
69 item, value = ivy.nested_map([item, value], _to_ivy_array)
70 self.ivy_array[item] = value
71
72 def __iter__(self):
73 if self.ndim == 0:
74 raise TypeError("iteration over a 0-d tensor not supported")
75 for i in range(self.shape[0]):
76 yield self[i]
77
78 # Instance Methods #
79 # ---------------- #
80
81 def reshape(self, *args, shape=None):
82 if args and shape:
83 raise TypeError("reshape() got multiple values for argument 'shape'")
84 if shape is not None:
85 return paddle_frontend.reshape(self._ivy_array, shape)
86 if args:
87 if isinstance(args[0], (tuple, list)):
88 shape = args[0]
89 return paddle_frontend.reshape(self._ivy_array, shape)
90 else:
91 return paddle_frontend.reshape(self._ivy_array, args)
92 return paddle_frontend.reshape(self._ivy_array)
93
94 def dim(self):
95 return self.ivy_array.ndim
96
97 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
98 def abs(self):
99 return paddle_frontend.abs(self)
100
101 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
102 def asinh(self, name=None):
103 return ivy.asinh(self._ivy_array)
104
105 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
106 def asin(self, name=None):
107 return ivy.asin(self._ivy_array)
108
109 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
110 def log(self, name=None):
111 return ivy.log(self._ivy_array)
112
113 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
114 def sin(self, name=None):
115 return ivy.sin(self._ivy_array)
116
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py
--- a/ivy/functional/frontends/paddle/tensor/tensor.py
+++ b/ivy/functional/frontends/paddle/tensor/tensor.py
@@ -114,3 +114,6 @@
def sin(self, name=None):
return ivy.sin(self._ivy_array)
+ @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+ def argmax(self, axis=None, keepdim=False, dtype=None, name=None):
+ return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py\n--- a/ivy/functional/frontends/paddle/tensor/tensor.py\n+++ b/ivy/functional/frontends/paddle/tensor/tensor.py\n@@ -114,3 +114,6 @@\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n \r\n+ @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n+ def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n+ return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n", "issue": "argmax\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}], "after_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}]} | 1,384 | 175 |
gh_patches_debug_13991 | rasdani/github-patches | git_diff | pyodide__pyodide-2099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.19 release
Opening an issue to track the 0.19 release.
I think we are fairly ready to make the 0.19.0 release. Ideally, the following would be nice to do before the 0.19 alpha release (but not critical),
- update browser versions used for testing https://github.com/pyodide/pyodide/pull/1952
- which should hopefully unblock emscripten update https://github.com/pyodide/pyodide/pull/2035
- run the update `meta.yaml` script for all pure python packages
For instance, maybe we could plan to have a
- A release candidate 0.19.0rc0: 2021/12/22 or 23 -> to make it easier still use the main branch up to the final release
- A final release 0.19.0: 2021/12/30
?
cc @hoodmane
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/py/pyodide/__init__.py`
Content:
```
1 # When the pyodide package is imported, both the js and the pyodide_js modules
2 # will be available to import from. Not all functions in pyodide_js will work
3 # until after pyodide is first imported, imported functions from pyodide_js
4 # should not be used at import time. It is fine to use js functions at import
5 # time.
6 #
7 # All pure Python code that does not require js or pyodide_js should go in
8 # the _pyodide package.
9 #
10 # This package is imported by the test suite as well, and currently we don't use
11 # pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before
12 # importing from these.
13
14 from ._core import (
15 JsProxy,
16 JsException,
17 create_once_callable,
18 create_proxy,
19 to_js,
20 IN_BROWSER,
21 ConversionError,
22 destroy_proxies,
23 )
24 from _pyodide._base import (
25 eval_code,
26 eval_code_async,
27 find_imports,
28 CodeRunner,
29 should_quiet,
30 )
31 from .http import open_url
32 from . import _state # noqa
33
34 from _pyodide._importhook import register_js_module, unregister_js_module
35
36 if IN_BROWSER:
37 import asyncio
38 from .webloop import WebLoopPolicy
39
40 asyncio.set_event_loop_policy(WebLoopPolicy())
41
42
43 __version__ = "0.19.0dev0"
44
45 __all__ = [
46 "open_url",
47 "eval_code",
48 "eval_code_async",
49 "CodeRunner",
50 "find_imports",
51 "JsProxy",
52 "JsException",
53 "to_js",
54 "register_js_module",
55 "unregister_js_module",
56 "create_once_callable",
57 "create_proxy",
58 "console",
59 "should_quiet",
60 "ConversionError",
61 "destroy_proxies",
62 ]
63
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Configuration file for the Sphinx documentation builder.
3
4 # -- Path setup --------------------------------------------------------------
5
6 import os
7 import sys
8 from typing import Dict, Any
9 import pathlib
10 import subprocess
11
12 base_dir = pathlib.Path(__file__).resolve().parent.parent
13 path_dirs = [
14 str(base_dir),
15 str(base_dir / "pyodide-build"),
16 str(base_dir / "docs/sphinx_pyodide"),
17 str(base_dir / "src/py"),
18 str(base_dir / "packages/micropip/src"),
19 ]
20 sys.path = path_dirs + sys.path
21
22 # -- Project information -----------------------------------------------------
23
24 project = "Pyodide"
25 copyright = "2019-2021, Pyodide contributors and Mozilla"
26
27 import pyodide
28 import micropip # noqa
29
30 # We hacked it so that autodoc will look for submodules, but only if we import
31 # them here. TODO: look these up in the source directory?
32 import pyodide.console
33 import pyodide.http
34 import pyodide.webloop
35
36 # The full version, including alpha/beta/rc tags.
37 release = version = pyodide.__version__
38
39
40 # -- General configuration ---------------------------------------------------
41
42 # If your documentation needs a minimal Sphinx version, state it here.
43 #
44 # needs_sphinx = '1.0'
45
46 extensions = [
47 "sphinx.ext.autodoc",
48 "sphinx.ext.autosummary",
49 "sphinxcontrib.napoleon",
50 "myst_parser",
51 "sphinx_js",
52 "autodocsumm",
53 "sphinx_panels",
54 "sphinx_pyodide",
55 "sphinx_argparse_cli",
56 # "versionwarning.extension",
57 "sphinx_issues",
58 ]
59
60 myst_enable_extensions = ["substitution"]
61 js_source_path = ["../src/js", "../src/core"]
62 jsdoc_config_path = "./jsdoc_conf.json"
63 root_for_relative_js_paths = "../src/"
64 issues_github_path = "pyodide/pyodide"
65
66 versionwarning_messages = {
67 "latest": (
68 "This is the development version of the documentation. ",
69 'See <a href="https://pyodide.org/">here</a> for latest stable '
70 "documentation. Please do not use Pyodide with non "
71 "versioned (`dev`) URLs from the CDN for deployed applications!",
72 )
73 }
74
75 autosummary_generate = True
76 autodoc_default_flags = ["members", "inherited-members"]
77
78 # Add any paths that contain templates here, relative to this directory.
79 templates_path = ["_templates"]
80
81 # The suffix(es) of source filenames.
82 source_suffix = [".rst", ".md"]
83
84 # The master toctree document.
85 master_doc = "index"
86
87 # The language for content autogenerated by Sphinx.
88 language = None
89
90 # List of patterns, relative to source directory, that match files and
91 # directories to ignore when looking for source files.
92 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"]
93
94 # The name of the Pygments (syntax highlighting) style to use.
95 pygments_style = None
96
97 # -- Options for HTML output -------------------------------------------------
98
99 # The theme to use for HTML and HTML Help pages. See the documentation for
100 # a list of builtin themes.
101 #
102 html_theme = "sphinx_book_theme"
103 html_logo = "_static/img/pyodide-logo.png"
104 html_title = f"Version {version}"
105
106 # theme-specific options
107 html_theme_options: Dict[str, Any] = {}
108
109 # paths that contain custom static files (such as style sheets)
110 html_static_path = ["_static"]
111
112
113 html_css_files = [
114 "css/pyodide.css",
115 ]
116
117 # Custom sidebar templates, must be a dictionary that maps document names
118 # to template names.
119 # html_sidebars = {}
120
121 # -- Options for HTMLHelp output ---------------------------------------------
122
123 # Output file base name for HTML help builder.
124 htmlhelp_basename = "Pyodidedoc"
125
126 # A list of files that should not be packed into the epub file.
127 epub_exclude_files = ["search.html"]
128
129 if "READTHEDOCS" in os.environ:
130 env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/dev/full/"}
131 os.makedirs("_build/html", exist_ok=True)
132 res = subprocess.check_output(
133 ["make", "-C", "..", "docs/_build/html/console.html"],
134 env=env,
135 stderr=subprocess.STDOUT,
136 encoding="utf-8",
137 )
138 print(res)
139
140
141 # Prevent API docs for webloop methods: they are the same as for base event loop
142 # and it clutters api docs too much
143
144
145 def delete_attrs(cls):
146 for name in dir(cls):
147 if not name.startswith("_"):
148 try:
149 delattr(cls, name)
150 except:
151 pass
152
153
154 delete_attrs(pyodide.webloop.WebLoop)
155 delete_attrs(pyodide.webloop.WebLoopPolicy)
156 delete_attrs(pyodide.console.PyodideConsole)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -127,7 +127,7 @@
epub_exclude_files = ["search.html"]
if "READTHEDOCS" in os.environ:
- env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/dev/full/"}
+ env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/v0.19.0/full/"}
os.makedirs("_build/html", exist_ok=True)
res = subprocess.check_output(
["make", "-C", "..", "docs/_build/html/console.html"],
diff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py
--- a/src/py/pyodide/__init__.py
+++ b/src/py/pyodide/__init__.py
@@ -40,7 +40,7 @@
asyncio.set_event_loop_policy(WebLoopPolicy())
-__version__ = "0.19.0dev0"
+__version__ = "0.19.0"
__all__ = [
"open_url",
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -127,7 +127,7 @@\n epub_exclude_files = [\"search.html\"]\n \n if \"READTHEDOCS\" in os.environ:\n- env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/dev/full/\"}\n+ env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/v0.19.0/full/\"}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\ndiff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py\n--- a/src/py/pyodide/__init__.py\n+++ b/src/py/pyodide/__init__.py\n@@ -40,7 +40,7 @@\n asyncio.set_event_loop_policy(WebLoopPolicy())\n \n \n-__version__ = \"0.19.0dev0\"\n+__version__ = \"0.19.0\"\n \n __all__ = [\n \"open_url\",\n", "issue": "0.19 release\nOpening an issue to track the 0.19 release.\r\n\r\nI think we are fairly ready to make the 0.19.0 release. Ideally, the following would be nice to do before the 0.19 alpha release (but not critical),\r\n - update browser versions used for testing https://github.com/pyodide/pyodide/pull/1952 \r\n - which should hopefully unblock emscripten update https://github.com/pyodide/pyodide/pull/2035\r\n - run the update `meta.yaml` script for all pure python packages \r\n\r\nFor instance, maybe we could plan to have a\r\n - A release candidate 0.19.0rc0: 2021/12/22 or 23 -> to make it easier still use the main branch up to the final release\r\n - A final release 0.19.0: 2021/12/30\r\n \r\n?\r\n\r\ncc @hoodmane \n", "before_files": [{"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n\nfrom ._core import (\n JsProxy,\n JsException,\n create_once_callable,\n create_proxy,\n to_js,\n IN_BROWSER,\n ConversionError,\n destroy_proxies,\n)\nfrom _pyodide._base import (\n eval_code,\n eval_code_async,\n find_imports,\n CodeRunner,\n should_quiet,\n)\nfrom .http import open_url\nfrom . import _state # noqa\n\nfrom _pyodide._importhook import register_js_module, unregister_js_module\n\nif IN_BROWSER:\n import asyncio\n from .webloop import WebLoopPolicy\n\n asyncio.set_event_loop_policy(WebLoopPolicy())\n\n\n__version__ = \"0.19.0dev0\"\n\n__all__ = [\n \"open_url\",\n \"eval_code\",\n \"eval_code_async\",\n \"CodeRunner\",\n \"find_imports\",\n \"JsProxy\",\n \"JsException\",\n \"to_js\",\n \"register_js_module\",\n \"unregister_js_module\",\n \"create_once_callable\",\n \"create_proxy\",\n \"console\",\n \"should_quiet\",\n \"ConversionError\",\n \"destroy_proxies\",\n]\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\nfrom typing import Dict, Any\nimport pathlib\nimport subprocess\n\nbase_dir = pathlib.Path(__file__).resolve().parent.parent\npath_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n]\nsys.path = path_dirs + sys.path\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2021, Pyodide contributors and Mozilla\"\n\nimport pyodide\nimport micropip # noqa\n\n# We hacked it so that autodoc will look for submodules, but only if we import\n# them here. TODO: look these up in the source directory?\nimport pyodide.console\nimport pyodide.http\nimport pyodide.webloop\n\n# The full version, including alpha/beta/rc tags.\nrelease = version = pyodide.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n # \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\njs_source_path = [\"../src/js\", \"../src/core\"]\njsdoc_config_path = \"./jsdoc_conf.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \",\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\",\n )\n}\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\nhtml_title = f\"Version {version}\"\n\n# theme-specific options\nhtml_theme_options: Dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\nif \"READTHEDOCS\" in os.environ:\n env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/dev/full/\"}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\n\n# Prevent API docs for webloop methods: they are the same as for base event loop\n# and it clutters api docs too much\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except:\n pass\n\n\ndelete_attrs(pyodide.webloop.WebLoop)\ndelete_attrs(pyodide.webloop.WebLoopPolicy)\ndelete_attrs(pyodide.console.PyodideConsole)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n\nfrom ._core import (\n JsProxy,\n JsException,\n create_once_callable,\n create_proxy,\n to_js,\n IN_BROWSER,\n ConversionError,\n destroy_proxies,\n)\nfrom _pyodide._base import (\n eval_code,\n eval_code_async,\n find_imports,\n CodeRunner,\n should_quiet,\n)\nfrom .http import open_url\nfrom . import _state # noqa\n\nfrom _pyodide._importhook import register_js_module, unregister_js_module\n\nif IN_BROWSER:\n import asyncio\n from .webloop import WebLoopPolicy\n\n asyncio.set_event_loop_policy(WebLoopPolicy())\n\n\n__version__ = \"0.19.0\"\n\n__all__ = [\n \"open_url\",\n \"eval_code\",\n \"eval_code_async\",\n \"CodeRunner\",\n \"find_imports\",\n \"JsProxy\",\n \"JsException\",\n \"to_js\",\n \"register_js_module\",\n \"unregister_js_module\",\n \"create_once_callable\",\n \"create_proxy\",\n \"console\",\n \"should_quiet\",\n \"ConversionError\",\n \"destroy_proxies\",\n]\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\nfrom typing import Dict, Any\nimport pathlib\nimport subprocess\n\nbase_dir = pathlib.Path(__file__).resolve().parent.parent\npath_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n]\nsys.path = path_dirs + sys.path\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2021, Pyodide contributors and Mozilla\"\n\nimport pyodide\nimport micropip # noqa\n\n# We hacked it so that autodoc will look for submodules, but only if we import\n# them here. TODO: look these up in the source directory?\nimport pyodide.console\nimport pyodide.http\nimport pyodide.webloop\n\n# The full version, including alpha/beta/rc tags.\nrelease = version = pyodide.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n # \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\njs_source_path = [\"../src/js\", \"../src/core\"]\njsdoc_config_path = \"./jsdoc_conf.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \",\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\",\n )\n}\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\nhtml_title = f\"Version {version}\"\n\n# theme-specific options\nhtml_theme_options: Dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\nif \"READTHEDOCS\" in os.environ:\n env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/v0.19.0/full/\"}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\n\n# Prevent API docs for webloop methods: they are the same as for base event loop\n# and it clutters api docs too much\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except:\n pass\n\n\ndelete_attrs(pyodide.webloop.WebLoop)\ndelete_attrs(pyodide.webloop.WebLoopPolicy)\ndelete_attrs(pyodide.console.PyodideConsole)\n", "path": "docs/conf.py"}]} | 2,447 | 258 |
gh_patches_debug_20786 | rasdani/github-patches | git_diff | python-discord__bot-941 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Publish messages to the #reddit channel
Since the #reddit channel has moved to the new News category we should convert it to an announcement channel and use the `.publish()` method on messages sent there so users can subscribe to daily reddit updates.
[Message.publish() in the discord.py docs](https://discordpy.readthedocs.io/en/latest/api.html#discord.Message.publish)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/reddit.py`
Content:
```
1 import asyncio
2 import logging
3 import random
4 import textwrap
5 from collections import namedtuple
6 from datetime import datetime, timedelta
7 from typing import List
8
9 from aiohttp import BasicAuth, ClientError
10 from discord import Colour, Embed, TextChannel
11 from discord.ext.commands import Cog, Context, group
12 from discord.ext.tasks import loop
13
14 from bot.bot import Bot
15 from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks
16 from bot.converters import Subreddit
17 from bot.decorators import with_role
18 from bot.pagination import LinePaginator
19
20 log = logging.getLogger(__name__)
21
22 AccessToken = namedtuple("AccessToken", ["token", "expires_at"])
23
24
25 class Reddit(Cog):
26 """Track subreddit posts and show detailed statistics about them."""
27
28 HEADERS = {"User-Agent": "python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)"}
29 URL = "https://www.reddit.com"
30 OAUTH_URL = "https://oauth.reddit.com"
31 MAX_RETRIES = 3
32
33 def __init__(self, bot: Bot):
34 self.bot = bot
35
36 self.webhook = None
37 self.access_token = None
38 self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)
39
40 bot.loop.create_task(self.init_reddit_ready())
41 self.auto_poster_loop.start()
42
43 def cog_unload(self) -> None:
44 """Stop the loop task and revoke the access token when the cog is unloaded."""
45 self.auto_poster_loop.cancel()
46 if self.access_token and self.access_token.expires_at > datetime.utcnow():
47 asyncio.create_task(self.revoke_access_token())
48
49 async def init_reddit_ready(self) -> None:
50 """Sets the reddit webhook when the cog is loaded."""
51 await self.bot.wait_until_guild_available()
52 if not self.webhook:
53 self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)
54
55 @property
56 def channel(self) -> TextChannel:
57 """Get the #reddit channel object from the bot's cache."""
58 return self.bot.get_channel(Channels.reddit)
59
60 async def get_access_token(self) -> None:
61 """
62 Get a Reddit API OAuth2 access token and assign it to self.access_token.
63
64 A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog
65 will be unloaded and a ClientError raised if retrieval was still unsuccessful.
66 """
67 for i in range(1, self.MAX_RETRIES + 1):
68 response = await self.bot.http_session.post(
69 url=f"{self.URL}/api/v1/access_token",
70 headers=self.HEADERS,
71 auth=self.client_auth,
72 data={
73 "grant_type": "client_credentials",
74 "duration": "temporary"
75 }
76 )
77
78 if response.status == 200 and response.content_type == "application/json":
79 content = await response.json()
80 expiration = int(content["expires_in"]) - 60 # Subtract 1 minute for leeway.
81 self.access_token = AccessToken(
82 token=content["access_token"],
83 expires_at=datetime.utcnow() + timedelta(seconds=expiration)
84 )
85
86 log.debug(f"New token acquired; expires on UTC {self.access_token.expires_at}")
87 return
88 else:
89 log.debug(
90 f"Failed to get an access token: "
91 f"status {response.status} & content type {response.content_type}; "
92 f"retrying ({i}/{self.MAX_RETRIES})"
93 )
94
95 await asyncio.sleep(3)
96
97 self.bot.remove_cog(self.qualified_name)
98 raise ClientError("Authentication with the Reddit API failed. Unloading the cog.")
99
100 async def revoke_access_token(self) -> None:
101 """
102 Revoke the OAuth2 access token for the Reddit API.
103
104 For security reasons, it's good practice to revoke the token when it's no longer being used.
105 """
106 response = await self.bot.http_session.post(
107 url=f"{self.URL}/api/v1/revoke_token",
108 headers=self.HEADERS,
109 auth=self.client_auth,
110 data={
111 "token": self.access_token.token,
112 "token_type_hint": "access_token"
113 }
114 )
115
116 if response.status == 204 and response.content_type == "application/json":
117 self.access_token = None
118 else:
119 log.warning(f"Unable to revoke access token: status {response.status}.")
120
121 async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:
122 """A helper method to fetch a certain amount of Reddit posts at a given route."""
123 # Reddit's JSON responses only provide 25 posts at most.
124 if not 25 >= amount > 0:
125 raise ValueError("Invalid amount of subreddit posts requested.")
126
127 # Renew the token if necessary.
128 if not self.access_token or self.access_token.expires_at < datetime.utcnow():
129 await self.get_access_token()
130
131 url = f"{self.OAUTH_URL}/{route}"
132 for _ in range(self.MAX_RETRIES):
133 response = await self.bot.http_session.get(
134 url=url,
135 headers={**self.HEADERS, "Authorization": f"bearer {self.access_token.token}"},
136 params=params
137 )
138 if response.status == 200 and response.content_type == 'application/json':
139 # Got appropriate response - process and return.
140 content = await response.json()
141 posts = content["data"]["children"]
142 return posts[:amount]
143
144 await asyncio.sleep(3)
145
146 log.debug(f"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}")
147 return list() # Failed to get appropriate response within allowed number of retries.
148
149 async def get_top_posts(self, subreddit: Subreddit, time: str = "all", amount: int = 5) -> Embed:
150 """
151 Get the top amount of posts for a given subreddit within a specified timeframe.
152
153 A time of "all" will get posts from all time, "day" will get top daily posts and "week" will get the top
154 weekly posts.
155
156 The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.
157 """
158 embed = Embed(description="")
159
160 posts = await self.fetch_posts(
161 route=f"{subreddit}/top",
162 amount=amount,
163 params={"t": time}
164 )
165
166 if not posts:
167 embed.title = random.choice(ERROR_REPLIES)
168 embed.colour = Colour.red()
169 embed.description = (
170 "Sorry! We couldn't find any posts from that subreddit. "
171 "If this problem persists, please let us know."
172 )
173
174 return embed
175
176 for post in posts:
177 data = post["data"]
178
179 text = data["selftext"]
180 if text:
181 text = textwrap.shorten(text, width=128, placeholder="...")
182 text += "\n" # Add newline to separate embed info
183
184 ups = data["ups"]
185 comments = data["num_comments"]
186 author = data["author"]
187
188 title = textwrap.shorten(data["title"], width=64, placeholder="...")
189 link = self.URL + data["permalink"]
190
191 embed.description += (
192 f"**[{title}]({link})**\n"
193 f"{text}"
194 f"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\n\n"
195 )
196
197 embed.colour = Colour.blurple()
198 return embed
199
200 @loop()
201 async def auto_poster_loop(self) -> None:
202 """Post the top 5 posts daily, and the top 5 posts weekly."""
203 # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter
204 now = datetime.utcnow()
205 tomorrow = now + timedelta(days=1)
206 midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)
207 seconds_until = (midnight_tomorrow - now).total_seconds()
208
209 await asyncio.sleep(seconds_until)
210
211 await self.bot.wait_until_guild_available()
212 if not self.webhook:
213 await self.bot.fetch_webhook(Webhooks.reddit)
214
215 if datetime.utcnow().weekday() == 0:
216 await self.top_weekly_posts()
217 # if it's a monday send the top weekly posts
218
219 for subreddit in RedditConfig.subreddits:
220 top_posts = await self.get_top_posts(subreddit=subreddit, time="day")
221 await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
222
223 async def top_weekly_posts(self) -> None:
224 """Post a summary of the top posts."""
225 for subreddit in RedditConfig.subreddits:
226 # Send and pin the new weekly posts.
227 top_posts = await self.get_top_posts(subreddit=subreddit, time="week")
228
229 message = await self.webhook.send(wait=True, username=f"{subreddit} Top Weekly Posts", embed=top_posts)
230
231 if subreddit.lower() == "r/python":
232 if not self.channel:
233 log.warning("Failed to get #reddit channel to remove pins in the weekly loop.")
234 return
235
236 # Remove the oldest pins so that only 12 remain at most.
237 pins = await self.channel.pins()
238
239 while len(pins) >= 12:
240 await pins[-1].unpin()
241 del pins[-1]
242
243 await message.pin()
244
245 @group(name="reddit", invoke_without_command=True)
246 async def reddit_group(self, ctx: Context) -> None:
247 """View the top posts from various subreddits."""
248 await ctx.send_help(ctx.command)
249
250 @reddit_group.command(name="top")
251 async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
252 """Send the top posts of all time from a given subreddit."""
253 async with ctx.typing():
254 embed = await self.get_top_posts(subreddit=subreddit, time="all")
255
256 await ctx.send(content=f"Here are the top {subreddit} posts of all time!", embed=embed)
257
258 @reddit_group.command(name="daily")
259 async def daily_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
260 """Send the top posts of today from a given subreddit."""
261 async with ctx.typing():
262 embed = await self.get_top_posts(subreddit=subreddit, time="day")
263
264 await ctx.send(content=f"Here are today's top {subreddit} posts!", embed=embed)
265
266 @reddit_group.command(name="weekly")
267 async def weekly_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
268 """Send the top posts of this week from a given subreddit."""
269 async with ctx.typing():
270 embed = await self.get_top_posts(subreddit=subreddit, time="week")
271
272 await ctx.send(content=f"Here are this week's top {subreddit} posts!", embed=embed)
273
274 @with_role(*STAFF_ROLES)
275 @reddit_group.command(name="subreddits", aliases=("subs",))
276 async def subreddits_command(self, ctx: Context) -> None:
277 """Send a paginated embed of all the subreddits we're relaying."""
278 embed = Embed()
279 embed.title = "Relayed subreddits."
280 embed.colour = Colour.blurple()
281
282 await LinePaginator.paginate(
283 RedditConfig.subreddits,
284 ctx, embed,
285 footer_text="Use the reddit commands along with these to view their posts.",
286 empty=False,
287 max_lines=15
288 )
289
290
291 def setup(bot: Bot) -> None:
292 """Load the Reddit cog."""
293 if not RedditConfig.secret or not RedditConfig.client_id:
294 log.error("Credentials not provided, cog not loaded.")
295 return
296 bot.add_cog(Reddit(bot))
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py
--- a/bot/cogs/reddit.py
+++ b/bot/cogs/reddit.py
@@ -218,7 +218,10 @@
for subreddit in RedditConfig.subreddits:
top_posts = await self.get_top_posts(subreddit=subreddit, time="day")
- await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
+ message = await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
+
+ if message.channel.is_news():
+ await message.publish()
async def top_weekly_posts(self) -> None:
"""Post a summary of the top posts."""
@@ -242,6 +245,9 @@
await message.pin()
+ if message.channel.is_news():
+ await message.publish()
+
@group(name="reddit", invoke_without_command=True)
async def reddit_group(self, ctx: Context) -> None:
"""View the top posts from various subreddits."""
| {"golden_diff": "diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py\n--- a/bot/cogs/reddit.py\n+++ b/bot/cogs/reddit.py\n@@ -218,7 +218,10 @@\n \n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n- await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n+ message = await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n+\n+ if message.channel.is_news():\n+ await message.publish()\n \n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n@@ -242,6 +245,9 @@\n \n await message.pin()\n \n+ if message.channel.is_news():\n+ await message.publish()\n+\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n", "issue": "Publish messages to the #reddit channel\nSince the #reddit channel has moved to the new News category we should convert it to an announcement channel and use the `.publish()` method on messages sent there so users can subscribe to daily reddit updates.\r\n\r\n[Message.publish() in the discord.py docs](https://discordpy.readthedocs.io/en/latest/api.html#discord.Message.publish)\n", "before_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom aiohttp import BasicAuth, ClientError\nfrom discord import Colour, Embed, TextChannel\nfrom discord.ext.commands import Cog, Context, group\nfrom discord.ext.tasks import loop\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks\nfrom bot.converters import Subreddit\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nAccessToken = namedtuple(\"AccessToken\", [\"token\", \"expires_at\"])\n\n\nclass Reddit(Cog):\n \"\"\"Track subreddit posts and show detailed statistics about them.\"\"\"\n\n HEADERS = {\"User-Agent\": \"python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)\"}\n URL = \"https://www.reddit.com\"\n OAUTH_URL = \"https://oauth.reddit.com\"\n MAX_RETRIES = 3\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.webhook = None\n self.access_token = None\n self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)\n\n bot.loop.create_task(self.init_reddit_ready())\n self.auto_poster_loop.start()\n\n def cog_unload(self) -> None:\n \"\"\"Stop the loop task and revoke the access token when the cog is unloaded.\"\"\"\n self.auto_poster_loop.cancel()\n if self.access_token and self.access_token.expires_at > datetime.utcnow():\n asyncio.create_task(self.revoke_access_token())\n\n async def init_reddit_ready(self) -> None:\n \"\"\"Sets the reddit webhook when the cog is loaded.\"\"\"\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)\n\n @property\n def channel(self) -> TextChannel:\n \"\"\"Get the #reddit channel object from the bot's cache.\"\"\"\n return self.bot.get_channel(Channels.reddit)\n\n async def get_access_token(self) -> None:\n \"\"\"\n Get a Reddit API OAuth2 access token and assign it to self.access_token.\n\n A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog\n will be unloaded and a ClientError raised if retrieval was still unsuccessful.\n \"\"\"\n for i in range(1, self.MAX_RETRIES + 1):\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/access_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"grant_type\": \"client_credentials\",\n \"duration\": \"temporary\"\n }\n )\n\n if response.status == 200 and response.content_type == \"application/json\":\n content = await response.json()\n expiration = int(content[\"expires_in\"]) - 60 # Subtract 1 minute for leeway.\n self.access_token = AccessToken(\n token=content[\"access_token\"],\n expires_at=datetime.utcnow() + timedelta(seconds=expiration)\n )\n\n log.debug(f\"New token acquired; expires on UTC {self.access_token.expires_at}\")\n return\n else:\n log.debug(\n f\"Failed to get an access token: \"\n f\"status {response.status} & content type {response.content_type}; \"\n f\"retrying ({i}/{self.MAX_RETRIES})\"\n )\n\n await asyncio.sleep(3)\n\n self.bot.remove_cog(self.qualified_name)\n raise ClientError(\"Authentication with the Reddit API failed. Unloading the cog.\")\n\n async def revoke_access_token(self) -> None:\n \"\"\"\n Revoke the OAuth2 access token for the Reddit API.\n\n For security reasons, it's good practice to revoke the token when it's no longer being used.\n \"\"\"\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/revoke_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"token\": self.access_token.token,\n \"token_type_hint\": \"access_token\"\n }\n )\n\n if response.status == 204 and response.content_type == \"application/json\":\n self.access_token = None\n else:\n log.warning(f\"Unable to revoke access token: status {response.status}.\")\n\n async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n \"\"\"A helper method to fetch a certain amount of Reddit posts at a given route.\"\"\"\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n # Renew the token if necessary.\n if not self.access_token or self.access_token.expires_at < datetime.utcnow():\n await self.get_access_token()\n\n url = f\"{self.OAUTH_URL}/{route}\"\n for _ in range(self.MAX_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers={**self.HEADERS, \"Authorization\": f\"bearer {self.access_token.token}\"},\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.\n\n async def get_top_posts(self, subreddit: Subreddit, time: str = \"all\", amount: int = 5) -> Embed:\n \"\"\"\n Get the top amount of posts for a given subreddit within a specified timeframe.\n\n A time of \"all\" will get posts from all time, \"day\" will get top daily posts and \"week\" will get the top\n weekly posts.\n\n The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.\n \"\"\"\n embed = Embed(description=\"\")\n\n posts = await self.fetch_posts(\n route=f\"{subreddit}/top\",\n amount=amount,\n params={\"t\": time}\n )\n\n if not posts:\n embed.title = random.choice(ERROR_REPLIES)\n embed.colour = Colour.red()\n embed.description = (\n \"Sorry! We couldn't find any posts from that subreddit. \"\n \"If this problem persists, please let us know.\"\n )\n\n return embed\n\n for post in posts:\n data = post[\"data\"]\n\n text = data[\"selftext\"]\n if text:\n text = textwrap.shorten(text, width=128, placeholder=\"...\")\n text += \"\\n\" # Add newline to separate embed info\n\n ups = data[\"ups\"]\n comments = data[\"num_comments\"]\n author = data[\"author\"]\n\n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n link = self.URL + data[\"permalink\"]\n\n embed.description += (\n f\"**[{title}]({link})**\\n\"\n f\"{text}\"\n f\"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\\n\\n\"\n )\n\n embed.colour = Colour.blurple()\n return embed\n\n @loop()\n async def auto_poster_loop(self) -> None:\n \"\"\"Post the top 5 posts daily, and the top 5 posts weekly.\"\"\"\n # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter\n now = datetime.utcnow()\n tomorrow = now + timedelta(days=1)\n midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)\n seconds_until = (midnight_tomorrow - now).total_seconds()\n\n await asyncio.sleep(seconds_until)\n\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n await self.bot.fetch_webhook(Webhooks.reddit)\n\n if datetime.utcnow().weekday() == 0:\n await self.top_weekly_posts()\n # if it's a monday send the top weekly posts\n\n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n\n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n for subreddit in RedditConfig.subreddits:\n # Send and pin the new weekly posts.\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n message = await self.webhook.send(wait=True, username=f\"{subreddit} Top Weekly Posts\", embed=top_posts)\n\n if subreddit.lower() == \"r/python\":\n if not self.channel:\n log.warning(\"Failed to get #reddit channel to remove pins in the weekly loop.\")\n return\n\n # Remove the oldest pins so that only 12 remain at most.\n pins = await self.channel.pins()\n\n while len(pins) >= 12:\n await pins[-1].unpin()\n del pins[-1]\n\n await message.pin()\n\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n await ctx.send_help(ctx.command)\n\n @reddit_group.command(name=\"top\")\n async def top_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of all time from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"all\")\n\n await ctx.send(content=f\"Here are the top {subreddit} posts of all time!\", embed=embed)\n\n @reddit_group.command(name=\"daily\")\n async def daily_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of today from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n\n await ctx.send(content=f\"Here are today's top {subreddit} posts!\", embed=embed)\n\n @reddit_group.command(name=\"weekly\")\n async def weekly_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of this week from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n await ctx.send(content=f\"Here are this week's top {subreddit} posts!\", embed=embed)\n\n @with_role(*STAFF_ROLES)\n @reddit_group.command(name=\"subreddits\", aliases=(\"subs\",))\n async def subreddits_command(self, ctx: Context) -> None:\n \"\"\"Send a paginated embed of all the subreddits we're relaying.\"\"\"\n embed = Embed()\n embed.title = \"Relayed subreddits.\"\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n RedditConfig.subreddits,\n ctx, embed,\n footer_text=\"Use the reddit commands along with these to view their posts.\",\n empty=False,\n max_lines=15\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Reddit cog.\"\"\"\n if not RedditConfig.secret or not RedditConfig.client_id:\n log.error(\"Credentials not provided, cog not loaded.\")\n return\n bot.add_cog(Reddit(bot))\n", "path": "bot/cogs/reddit.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom aiohttp import BasicAuth, ClientError\nfrom discord import Colour, Embed, TextChannel\nfrom discord.ext.commands import Cog, Context, group\nfrom discord.ext.tasks import loop\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks\nfrom bot.converters import Subreddit\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nAccessToken = namedtuple(\"AccessToken\", [\"token\", \"expires_at\"])\n\n\nclass Reddit(Cog):\n \"\"\"Track subreddit posts and show detailed statistics about them.\"\"\"\n\n HEADERS = {\"User-Agent\": \"python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)\"}\n URL = \"https://www.reddit.com\"\n OAUTH_URL = \"https://oauth.reddit.com\"\n MAX_RETRIES = 3\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.webhook = None\n self.access_token = None\n self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)\n\n bot.loop.create_task(self.init_reddit_ready())\n self.auto_poster_loop.start()\n\n def cog_unload(self) -> None:\n \"\"\"Stop the loop task and revoke the access token when the cog is unloaded.\"\"\"\n self.auto_poster_loop.cancel()\n if self.access_token and self.access_token.expires_at > datetime.utcnow():\n asyncio.create_task(self.revoke_access_token())\n\n async def init_reddit_ready(self) -> None:\n \"\"\"Sets the reddit webhook when the cog is loaded.\"\"\"\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)\n\n @property\n def channel(self) -> TextChannel:\n \"\"\"Get the #reddit channel object from the bot's cache.\"\"\"\n return self.bot.get_channel(Channels.reddit)\n\n async def get_access_token(self) -> None:\n \"\"\"\n Get a Reddit API OAuth2 access token and assign it to self.access_token.\n\n A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog\n will be unloaded and a ClientError raised if retrieval was still unsuccessful.\n \"\"\"\n for i in range(1, self.MAX_RETRIES + 1):\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/access_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"grant_type\": \"client_credentials\",\n \"duration\": \"temporary\"\n }\n )\n\n if response.status == 200 and response.content_type == \"application/json\":\n content = await response.json()\n expiration = int(content[\"expires_in\"]) - 60 # Subtract 1 minute for leeway.\n self.access_token = AccessToken(\n token=content[\"access_token\"],\n expires_at=datetime.utcnow() + timedelta(seconds=expiration)\n )\n\n log.debug(f\"New token acquired; expires on UTC {self.access_token.expires_at}\")\n return\n else:\n log.debug(\n f\"Failed to get an access token: \"\n f\"status {response.status} & content type {response.content_type}; \"\n f\"retrying ({i}/{self.MAX_RETRIES})\"\n )\n\n await asyncio.sleep(3)\n\n self.bot.remove_cog(self.qualified_name)\n raise ClientError(\"Authentication with the Reddit API failed. Unloading the cog.\")\n\n async def revoke_access_token(self) -> None:\n \"\"\"\n Revoke the OAuth2 access token for the Reddit API.\n\n For security reasons, it's good practice to revoke the token when it's no longer being used.\n \"\"\"\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/revoke_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"token\": self.access_token.token,\n \"token_type_hint\": \"access_token\"\n }\n )\n\n if response.status == 204 and response.content_type == \"application/json\":\n self.access_token = None\n else:\n log.warning(f\"Unable to revoke access token: status {response.status}.\")\n\n async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n \"\"\"A helper method to fetch a certain amount of Reddit posts at a given route.\"\"\"\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n # Renew the token if necessary.\n if not self.access_token or self.access_token.expires_at < datetime.utcnow():\n await self.get_access_token()\n\n url = f\"{self.OAUTH_URL}/{route}\"\n for _ in range(self.MAX_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers={**self.HEADERS, \"Authorization\": f\"bearer {self.access_token.token}\"},\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.\n\n async def get_top_posts(self, subreddit: Subreddit, time: str = \"all\", amount: int = 5) -> Embed:\n \"\"\"\n Get the top amount of posts for a given subreddit within a specified timeframe.\n\n A time of \"all\" will get posts from all time, \"day\" will get top daily posts and \"week\" will get the top\n weekly posts.\n\n The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.\n \"\"\"\n embed = Embed(description=\"\")\n\n posts = await self.fetch_posts(\n route=f\"{subreddit}/top\",\n amount=amount,\n params={\"t\": time}\n )\n\n if not posts:\n embed.title = random.choice(ERROR_REPLIES)\n embed.colour = Colour.red()\n embed.description = (\n \"Sorry! We couldn't find any posts from that subreddit. \"\n \"If this problem persists, please let us know.\"\n )\n\n return embed\n\n for post in posts:\n data = post[\"data\"]\n\n text = data[\"selftext\"]\n if text:\n text = textwrap.shorten(text, width=128, placeholder=\"...\")\n text += \"\\n\" # Add newline to separate embed info\n\n ups = data[\"ups\"]\n comments = data[\"num_comments\"]\n author = data[\"author\"]\n\n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n link = self.URL + data[\"permalink\"]\n\n embed.description += (\n f\"**[{title}]({link})**\\n\"\n f\"{text}\"\n f\"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\\n\\n\"\n )\n\n embed.colour = Colour.blurple()\n return embed\n\n @loop()\n async def auto_poster_loop(self) -> None:\n \"\"\"Post the top 5 posts daily, and the top 5 posts weekly.\"\"\"\n # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter\n now = datetime.utcnow()\n tomorrow = now + timedelta(days=1)\n midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)\n seconds_until = (midnight_tomorrow - now).total_seconds()\n\n await asyncio.sleep(seconds_until)\n\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n await self.bot.fetch_webhook(Webhooks.reddit)\n\n if datetime.utcnow().weekday() == 0:\n await self.top_weekly_posts()\n # if it's a monday send the top weekly posts\n\n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n message = await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n\n if message.channel.is_news():\n await message.publish()\n\n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n for subreddit in RedditConfig.subreddits:\n # Send and pin the new weekly posts.\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n message = await self.webhook.send(wait=True, username=f\"{subreddit} Top Weekly Posts\", embed=top_posts)\n\n if subreddit.lower() == \"r/python\":\n if not self.channel:\n log.warning(\"Failed to get #reddit channel to remove pins in the weekly loop.\")\n return\n\n # Remove the oldest pins so that only 12 remain at most.\n pins = await self.channel.pins()\n\n while len(pins) >= 12:\n await pins[-1].unpin()\n del pins[-1]\n\n await message.pin()\n\n if message.channel.is_news():\n await message.publish()\n\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n await ctx.send_help(ctx.command)\n\n @reddit_group.command(name=\"top\")\n async def top_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of all time from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"all\")\n\n await ctx.send(content=f\"Here are the top {subreddit} posts of all time!\", embed=embed)\n\n @reddit_group.command(name=\"daily\")\n async def daily_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of today from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n\n await ctx.send(content=f\"Here are today's top {subreddit} posts!\", embed=embed)\n\n @reddit_group.command(name=\"weekly\")\n async def weekly_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of this week from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n await ctx.send(content=f\"Here are this week's top {subreddit} posts!\", embed=embed)\n\n @with_role(*STAFF_ROLES)\n @reddit_group.command(name=\"subreddits\", aliases=(\"subs\",))\n async def subreddits_command(self, ctx: Context) -> None:\n \"\"\"Send a paginated embed of all the subreddits we're relaying.\"\"\"\n embed = Embed()\n embed.title = \"Relayed subreddits.\"\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n RedditConfig.subreddits,\n ctx, embed,\n footer_text=\"Use the reddit commands along with these to view their posts.\",\n empty=False,\n max_lines=15\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Reddit cog.\"\"\"\n if not RedditConfig.secret or not RedditConfig.client_id:\n log.error(\"Credentials not provided, cog not loaded.\")\n return\n bot.add_cog(Reddit(bot))\n", "path": "bot/cogs/reddit.py"}]} | 3,705 | 240 |
gh_patches_debug_10275 | rasdani/github-patches | git_diff | mindsdb__lightwood-524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`sktime` missing dependency
In requirements.txt
`sktime[arima] >= 0.5.0`
bat
`(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0
WARNING: sktime 0.7.0 does not provide the extra 'arima'`
install
`(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0`
`sktime` missing dependency
In requirements.txt
`sktime[arima] >= 0.5.0`
bat
`(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0
WARNING: sktime 0.7.0 does not provide the extra 'arima'`
install
`(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/model/sktime.py`
Content:
```
1 import numpy as np
2 import pandas as pd
3 from typing import Dict, List, Union
4 from sktime.forecasting.arima import AutoARIMA
5
6 from lightwood.api import dtype
7 from lightwood.helpers.log import log
8 from lightwood.model.base import BaseModel
9 from lightwood.encoder.time_series.helpers.common import get_group_matches
10 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs
11
12
13 class SkTime(BaseModel):
14 forecaster: str
15 n_ts_predictions: int
16 target: str
17 supports_proba: bool
18
19 def __init__(
20 self, stop_after: int, target: str, dtype_dict: Dict[str, str],
21 n_ts_predictions: int, ts_analysis: Dict):
22 super().__init__(stop_after)
23 self.target = target
24 dtype_dict[target] = dtype.float
25 self.model_class = AutoARIMA
26 self.models = {}
27 self.n_ts_predictions = n_ts_predictions
28 self.ts_analysis = ts_analysis
29 self.forecasting_horizon = np.arange(1, self.n_ts_predictions)
30 self.cutoff_index = {} # marks index at which training data stops and forecasting window starts
31 self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by
32 self.supports_proba = False
33 self.stable = True
34
35 def fit(self, ds_arr: List[EncodedDs]) -> None:
36 log.info('Started fitting sktime forecaster for array prediction')
37
38 all_folds = ConcatedEncodedDs(ds_arr)
39 df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis["tss"].order_by[0]}')
40 data = {'data': df[self.target],
41 'group_info': {gcol: df[gcol].tolist()
42 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
43
44 for group in self.ts_analysis['group_combinations']:
45 self.models[group] = self.model_class()
46
47 if self.grouped_by == ['__default']:
48 series_idxs = data['data'].index
49 series_data = data['data'].values
50 else:
51 series_idxs, series_data = get_group_matches(data, group)
52
53 if series_data.size > 0:
54 series = pd.Series(series_data.squeeze(), index=series_idxs)
55 series = series.sort_index(ascending=True)
56 series = series.reset_index(drop=True)
57 try:
58 self.models[group].fit(series)
59 except ValueError:
60 self.models[group] = self.model_class(deseasonalize=False)
61 self.models[group].fit(series)
62
63 self.cutoff_index[group] = len(series)
64
65 if self.grouped_by == ['__default']:
66 break
67
68 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:
69 if predict_proba:
70 log.warning('This model does not output probability estimates')
71
72 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)
73 ydf = pd.DataFrame(0, # zero-filled
74 index=np.arange(length),
75 columns=['prediction'],
76 dtype=object)
77
78 data = {'data': ds.data_frame[self.target].reset_index(drop=True),
79 'group_info': {gcol: ds.data_frame[gcol].tolist()
80 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
81
82 # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder
83
84 for group in self.ts_analysis['group_combinations']:
85
86 if self.grouped_by == ['__default']:
87 series_idxs = data['data'].index
88 series_data = data['data'].values
89 else:
90 series_idxs, series_data = get_group_matches(data, group)
91
92 if series_data.size > 0:
93 series = pd.Series(series_data.squeeze(), index=series_idxs)
94 series = series.sort_index(ascending=True)
95 series = series.reset_index(drop=True)
96
97 for idx, _ in enumerate(series.iteritems()):
98 ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(
99 np.arange(idx, # +cutoff
100 idx + self.n_ts_predictions)).tolist() # +cutoff
101
102 if self.grouped_by == ['__default']:
103 break
104
105 return ydf[['prediction']]
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py
--- a/lightwood/model/sktime.py
+++ b/lightwood/model/sktime.py
@@ -42,7 +42,8 @@
for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
for group in self.ts_analysis['group_combinations']:
- self.models[group] = self.model_class()
+ # many warnings might be thrown inside of statsmodels during stepwise procedure
+ self.models[group] = self.model_class(suppress_warnings=True)
if self.grouped_by == ['__default']:
series_idxs = data['data'].index
| {"golden_diff": "diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py\n--- a/lightwood/model/sktime.py\n+++ b/lightwood/model/sktime.py\n@@ -42,7 +42,8 @@\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n \n for group in self.ts_analysis['group_combinations']:\n- self.models[group] = self.model_class()\n+ # many warnings might be thrown inside of statsmodels during stepwise procedure\n+ self.models[group] = self.model_class(suppress_warnings=True)\n \n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n", "issue": "`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Union\nfrom sktime.forecasting.arima import AutoARIMA\n\nfrom lightwood.api import dtype\nfrom lightwood.helpers.log import log\nfrom lightwood.model.base import BaseModel\nfrom lightwood.encoder.time_series.helpers.common import get_group_matches\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass SkTime(BaseModel):\n forecaster: str\n n_ts_predictions: int\n target: str\n supports_proba: bool\n\n def __init__(\n self, stop_after: int, target: str, dtype_dict: Dict[str, str],\n n_ts_predictions: int, ts_analysis: Dict):\n super().__init__(stop_after)\n self.target = target\n dtype_dict[target] = dtype.float\n self.model_class = AutoARIMA\n self.models = {}\n self.n_ts_predictions = n_ts_predictions\n self.ts_analysis = ts_analysis\n self.forecasting_horizon = np.arange(1, self.n_ts_predictions)\n self.cutoff_index = {} # marks index at which training data stops and forecasting window starts\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n\n def fit(self, ds_arr: List[EncodedDs]) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n\n all_folds = ConcatedEncodedDs(ds_arr)\n df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis[\"tss\"].order_by[0]}')\n data = {'data': df[self.target],\n 'group_info': {gcol: df[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n for group in self.ts_analysis['group_combinations']:\n self.models[group] = self.model_class()\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n try:\n self.models[group].fit(series)\n except ValueError:\n self.models[group] = self.model_class(deseasonalize=False)\n self.models[group].fit(series)\n\n self.cutoff_index[group] = len(series)\n\n if self.grouped_by == ['__default']:\n break\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:\n if predict_proba:\n log.warning('This model does not output probability estimates')\n\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=['prediction'],\n dtype=object)\n\n data = {'data': ds.data_frame[self.target].reset_index(drop=True),\n 'group_info': {gcol: ds.data_frame[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder\n\n for group in self.ts_analysis['group_combinations']:\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n\n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(\n np.arange(idx, # +cutoff\n idx + self.n_ts_predictions)).tolist() # +cutoff\n\n if self.grouped_by == ['__default']:\n break\n\n return ydf[['prediction']]\n", "path": "lightwood/model/sktime.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Union\nfrom sktime.forecasting.arima import AutoARIMA\n\nfrom lightwood.api import dtype\nfrom lightwood.helpers.log import log\nfrom lightwood.model.base import BaseModel\nfrom lightwood.encoder.time_series.helpers.common import get_group_matches\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass SkTime(BaseModel):\n forecaster: str\n n_ts_predictions: int\n target: str\n supports_proba: bool\n\n def __init__(\n self, stop_after: int, target: str, dtype_dict: Dict[str, str],\n n_ts_predictions: int, ts_analysis: Dict):\n super().__init__(stop_after)\n self.target = target\n dtype_dict[target] = dtype.float\n self.model_class = AutoARIMA\n self.models = {}\n self.n_ts_predictions = n_ts_predictions\n self.ts_analysis = ts_analysis\n self.forecasting_horizon = np.arange(1, self.n_ts_predictions)\n self.cutoff_index = {} # marks index at which training data stops and forecasting window starts\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n\n def fit(self, ds_arr: List[EncodedDs]) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n\n all_folds = ConcatedEncodedDs(ds_arr)\n df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis[\"tss\"].order_by[0]}')\n data = {'data': df[self.target],\n 'group_info': {gcol: df[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n for group in self.ts_analysis['group_combinations']:\n # many warnings might be thrown inside of statsmodels during stepwise procedure\n self.models[group] = self.model_class(suppress_warnings=True)\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n try:\n self.models[group].fit(series)\n except ValueError:\n self.models[group] = self.model_class(deseasonalize=False)\n self.models[group].fit(series)\n\n self.cutoff_index[group] = len(series)\n\n if self.grouped_by == ['__default']:\n break\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:\n if predict_proba:\n log.warning('This model does not output probability estimates')\n\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=['prediction'],\n dtype=object)\n\n data = {'data': ds.data_frame[self.target].reset_index(drop=True),\n 'group_info': {gcol: ds.data_frame[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder\n\n for group in self.ts_analysis['group_combinations']:\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n\n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(\n np.arange(idx, # +cutoff\n idx + self.n_ts_predictions)).tolist() # +cutoff\n\n if self.grouped_by == ['__default']:\n break\n\n return ydf[['prediction']]\n", "path": "lightwood/model/sktime.py"}]} | 1,635 | 154 |
gh_patches_debug_26823 | rasdani/github-patches | git_diff | ydataai__ydata-profiling-67 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add badges for "highly skewed", "zeros"
I enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas_profiling/templates.py`
Content:
```
1 # coding=UTF-8
2
3 '''This file contains all templates used for generating the HTML profile report'''
4
5 from jinja2 import Environment, PackageLoader
6
7 # Initializing Jinja
8 pl = PackageLoader('pandas_profiling', 'templates')
9 jinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)
10
11 # Mapping between template name and file
12 templates = {'freq_table_row': 'freq_table_row.html',
13 'mini_freq_table_row': 'mini_freq_table_row.html',
14 'freq_table': 'freq_table.html',
15 'mini_freq_table': 'mini_freq_table.html',
16 'row_num': 'row_num.html',
17 'row_date': 'row_date.html',
18 'row_cat': 'row_cat.html',
19 'row_bool': 'row_bool.html',
20 'row_corr': 'row_corr.html',
21 'row_recoded': 'row_recoded.html',
22 'row_const': 'row_const.html',
23 'row_unique': 'row_unique.html',
24 'overview': 'overview.html',
25 'sample': 'sample.html',
26 'base': 'base.html',
27 'wrapper': 'wrapper.html'
28 }
29
30 # Mapping between row type and var type
31 var_type = {'NUM': 'Numeric',
32 'DATE': 'Date',
33 'CAT': 'Categorical',
34 'UNIQUE': 'Categorical, Unique',
35 'BOOL': 'Boolean',
36 'CONST': 'Constant',
37 'CORR': 'Highly correlated',
38 'RECODED': 'Recoded'
39 }
40
41
42 def template(template_name):
43 """Return a jinja template ready for rendering. If needed, global variables are initialized.
44
45 Parameters
46 ----------
47 template_name: str, the name of the template as defined in the templates mapping
48
49 Returns
50 -------
51 The Jinja template ready for rendering
52 """
53 globals = None
54 if template_name.startswith('row_'):
55 # This is a row template setting global variable
56 globals = dict()
57 globals['vartype'] = var_type[template_name.split('_')[1].upper()]
58 return jinja2_env.get_template(templates[template_name], globals=globals)
59
60
61 # mapping between row type and template name
62 row_templates_dict = {'NUM': template('row_num'),
63 'DATE': template('row_date'),
64 'DISCRETE': template('row_num'),
65 'CAT': template('row_cat'),
66 'BOOL': template('row_bool'),
67 'UNIQUE': template('row_unique'),
68 'CONST': template('row_const'),
69 'CORR': template('row_corr'),
70 'RECODED': template('row_recoded')
71 }
72
73 # The number of column to use in the display of the frequency table according to the category
74 mini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}
75
76 messages = dict()
77 messages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class="label label-primary">Rejected</span>'
78 messages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (ρ = {0[correlation]}) <span class="label label-primary">Rejected</span>'
79 messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>'
80 messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>'
81 messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>'
82 messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})'
83 messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>'
84 messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>'
85 messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'
86
87 message_row = u'<li>{message}</li>'
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py
--- a/pandas_profiling/templates.py
+++ b/pandas_profiling/templates.py
@@ -79,9 +79,9 @@
messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>'
messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>'
messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>'
-messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})'
+messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]}) <span class="label label-info">Skewed</span>'
messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>'
messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>'
-messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'
+messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class="label label-info">Zeros</span>'
message_row = u'<li>{message}</li>'
| {"golden_diff": "diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py\n--- a/pandas_profiling/templates.py\n+++ b/pandas_profiling/templates.py\n@@ -79,9 +79,9 @@\n messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\n messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\n messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\n-messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\n+messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]}) <span class=\"label label-info\">Skewed</span>'\n messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\n messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\n-messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n+messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class=\"label label-info\">Zeros</span>'\n \n message_row = u'<li>{message}</li>'\n", "issue": "Add badges for \"highly skewed\", \"zeros\"\nI enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere.\n", "before_files": [{"content": "# coding=UTF-8\n\n'''This file contains all templates used for generating the HTML profile report'''\n\nfrom jinja2 import Environment, PackageLoader\n\n# Initializing Jinja\npl = PackageLoader('pandas_profiling', 'templates')\njinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)\n\n# Mapping between template name and file\ntemplates = {'freq_table_row': 'freq_table_row.html',\n 'mini_freq_table_row': 'mini_freq_table_row.html',\n 'freq_table': 'freq_table.html',\n 'mini_freq_table': 'mini_freq_table.html',\n 'row_num': 'row_num.html',\n 'row_date': 'row_date.html',\n 'row_cat': 'row_cat.html',\n 'row_bool': 'row_bool.html',\n 'row_corr': 'row_corr.html',\n 'row_recoded': 'row_recoded.html',\n 'row_const': 'row_const.html',\n 'row_unique': 'row_unique.html',\n 'overview': 'overview.html',\n 'sample': 'sample.html',\n 'base': 'base.html',\n 'wrapper': 'wrapper.html'\n }\n\n# Mapping between row type and var type\nvar_type = {'NUM': 'Numeric',\n 'DATE': 'Date',\n 'CAT': 'Categorical',\n 'UNIQUE': 'Categorical, Unique',\n 'BOOL': 'Boolean',\n 'CONST': 'Constant',\n 'CORR': 'Highly correlated',\n 'RECODED': 'Recoded'\n }\n\n\ndef template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n\n\n# mapping between row type and template name\nrow_templates_dict = {'NUM': template('row_num'),\n 'DATE': template('row_date'),\n 'DISCRETE': template('row_num'),\n 'CAT': template('row_cat'),\n 'BOOL': template('row_bool'),\n 'UNIQUE': template('row_unique'),\n 'CONST': template('row_const'),\n 'CORR': template('row_corr'),\n 'RECODED': template('row_recoded')\n }\n\n# The number of column to use in the display of the frequency table according to the category\nmini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}\n\nmessages = dict()\nmessages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class=\"label label-primary\">Rejected</span>'\nmessages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (\u03c1 = {0[correlation]}) <span class=\"label label-primary\">Rejected</span>'\nmessages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\nmessages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\nmessages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\nmessages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\nmessages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\nmessages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\nmessages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n\nmessage_row = u'<li>{message}</li>'\n", "path": "pandas_profiling/templates.py"}], "after_files": [{"content": "# coding=UTF-8\n\n'''This file contains all templates used for generating the HTML profile report'''\n\nfrom jinja2 import Environment, PackageLoader\n\n# Initializing Jinja\npl = PackageLoader('pandas_profiling', 'templates')\njinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)\n\n# Mapping between template name and file\ntemplates = {'freq_table_row': 'freq_table_row.html',\n 'mini_freq_table_row': 'mini_freq_table_row.html',\n 'freq_table': 'freq_table.html',\n 'mini_freq_table': 'mini_freq_table.html',\n 'row_num': 'row_num.html',\n 'row_date': 'row_date.html',\n 'row_cat': 'row_cat.html',\n 'row_bool': 'row_bool.html',\n 'row_corr': 'row_corr.html',\n 'row_recoded': 'row_recoded.html',\n 'row_const': 'row_const.html',\n 'row_unique': 'row_unique.html',\n 'overview': 'overview.html',\n 'sample': 'sample.html',\n 'base': 'base.html',\n 'wrapper': 'wrapper.html'\n }\n\n# Mapping between row type and var type\nvar_type = {'NUM': 'Numeric',\n 'DATE': 'Date',\n 'CAT': 'Categorical',\n 'UNIQUE': 'Categorical, Unique',\n 'BOOL': 'Boolean',\n 'CONST': 'Constant',\n 'CORR': 'Highly correlated',\n 'RECODED': 'Recoded'\n }\n\n\ndef template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n\n\n# mapping between row type and template name\nrow_templates_dict = {'NUM': template('row_num'),\n 'DATE': template('row_date'),\n 'DISCRETE': template('row_num'),\n 'CAT': template('row_cat'),\n 'BOOL': template('row_bool'),\n 'UNIQUE': template('row_unique'),\n 'CONST': template('row_const'),\n 'CORR': template('row_corr'),\n 'RECODED': template('row_recoded')\n }\n\n# The number of column to use in the display of the frequency table according to the category\nmini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}\n\nmessages = dict()\nmessages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class=\"label label-primary\">Rejected</span>'\nmessages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (\u03c1 = {0[correlation]}) <span class=\"label label-primary\">Rejected</span>'\nmessages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\nmessages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\nmessages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\nmessages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]}) <span class=\"label label-info\">Skewed</span>'\nmessages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\nmessages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\nmessages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class=\"label label-info\">Zeros</span>'\n\nmessage_row = u'<li>{message}</li>'\n", "path": "pandas_profiling/templates.py"}]} | 1,395 | 377 |
gh_patches_debug_29517 | rasdani/github-patches | git_diff | pypa__setuptools-2878 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate setuptools.installer but not setup_requires
> You're going to break entire workflows and distros with this, please reconsider.
I'm tempted to un-deprecate `setup_requires` as a parameter, but to deprecate/remove only the `fetch_build_eggs` part. In other words, continue to allow `setup_requires` to supply requirements for PEP 517 hooks (`get_requires_for_build_sdist` and `get_requires_for_build_wheel`).
_Originally posted by @jaraco in https://github.com/pypa/setuptools/issues/2824#issuecomment-967430754_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/installer.py`
Content:
```
1 import glob
2 import os
3 import subprocess
4 import sys
5 import tempfile
6 from distutils import log
7 from distutils.errors import DistutilsError
8
9 import pkg_resources
10 from setuptools.wheel import Wheel
11
12
13 def _fixup_find_links(find_links):
14 """Ensure find-links option end-up being a list of strings."""
15 if isinstance(find_links, str):
16 return find_links.split()
17 assert isinstance(find_links, (tuple, list))
18 return find_links
19
20
21 def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME
22 """Fetch an egg needed for building.
23
24 Use pip/wheel to fetch/build a wheel."""
25 # Warn if wheel is not available
26 try:
27 pkg_resources.get_distribution('wheel')
28 except pkg_resources.DistributionNotFound:
29 dist.announce('WARNING: The wheel package is not available.', log.WARN)
30 # Ignore environment markers; if supplied, it is required.
31 req = strip_marker(req)
32 # Take easy_install options into account, but do not override relevant
33 # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
34 # take precedence.
35 opts = dist.get_option_dict('easy_install')
36 if 'allow_hosts' in opts:
37 raise DistutilsError('the `allow-hosts` option is not supported '
38 'when using pip to install requirements.')
39 quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
40 if 'PIP_INDEX_URL' in os.environ:
41 index_url = None
42 elif 'index_url' in opts:
43 index_url = opts['index_url'][1]
44 else:
45 index_url = None
46 find_links = (
47 _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts
48 else []
49 )
50 if dist.dependency_links:
51 find_links.extend(dist.dependency_links)
52 eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
53 environment = pkg_resources.Environment()
54 for egg_dist in pkg_resources.find_distributions(eggs_dir):
55 if egg_dist in req and environment.can_add(egg_dist):
56 return egg_dist
57 with tempfile.TemporaryDirectory() as tmpdir:
58 cmd = [
59 sys.executable, '-m', 'pip',
60 '--disable-pip-version-check',
61 'wheel', '--no-deps',
62 '-w', tmpdir,
63 ]
64 if quiet:
65 cmd.append('--quiet')
66 if index_url is not None:
67 cmd.extend(('--index-url', index_url))
68 for link in find_links or []:
69 cmd.extend(('--find-links', link))
70 # If requirement is a PEP 508 direct URL, directly pass
71 # the URL to pip, as `req @ url` does not work on the
72 # command line.
73 cmd.append(req.url or str(req))
74 try:
75 subprocess.check_call(cmd)
76 except subprocess.CalledProcessError as e:
77 raise DistutilsError(str(e)) from e
78 wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
79 dist_location = os.path.join(eggs_dir, wheel.egg_name())
80 wheel.install_as_egg(dist_location)
81 dist_metadata = pkg_resources.PathMetadata(
82 dist_location, os.path.join(dist_location, 'EGG-INFO'))
83 dist = pkg_resources.Distribution.from_filename(
84 dist_location, metadata=dist_metadata)
85 return dist
86
87
88 def strip_marker(req):
89 """
90 Return a new requirement without the environment marker to avoid
91 calling pip with something like `babel; extra == "i18n"`, which
92 would always be ignored.
93 """
94 # create a copy to avoid mutating the input
95 req = pkg_resources.Requirement.parse(str(req))
96 req.marker = None
97 return req
98
```
Path: `setuptools/__init__.py`
Content:
```
1 """Extensions to the 'distutils' for large or complex distributions"""
2
3 from fnmatch import fnmatchcase
4 import functools
5 import os
6 import re
7 import warnings
8
9 import _distutils_hack.override # noqa: F401
10
11 import distutils.core
12 from distutils.errors import DistutilsOptionError
13 from distutils.util import convert_path
14
15 from ._deprecation_warning import SetuptoolsDeprecationWarning
16
17 import setuptools.version
18 from setuptools.extension import Extension
19 from setuptools.dist import Distribution
20 from setuptools.depends import Require
21 from . import monkey
22
23
24 __all__ = [
25 'setup',
26 'Distribution',
27 'Command',
28 'Extension',
29 'Require',
30 'SetuptoolsDeprecationWarning',
31 'find_packages',
32 'find_namespace_packages',
33 ]
34
35 __version__ = setuptools.version.__version__
36
37 bootstrap_install_from = None
38
39
40 class PackageFinder:
41 """
42 Generate a list of all Python packages found within a directory
43 """
44
45 @classmethod
46 def find(cls, where='.', exclude=(), include=('*',)):
47 """Return a list all Python packages found within directory 'where'
48
49 'where' is the root directory which will be searched for packages. It
50 should be supplied as a "cross-platform" (i.e. URL-style) path; it will
51 be converted to the appropriate local path syntax.
52
53 'exclude' is a sequence of package names to exclude; '*' can be used
54 as a wildcard in the names, such that 'foo.*' will exclude all
55 subpackages of 'foo' (but not 'foo' itself).
56
57 'include' is a sequence of package names to include. If it's
58 specified, only the named packages will be included. If it's not
59 specified, all found packages will be included. 'include' can contain
60 shell style wildcard patterns just like 'exclude'.
61 """
62
63 return list(
64 cls._find_packages_iter(
65 convert_path(where),
66 cls._build_filter('ez_setup', '*__pycache__', *exclude),
67 cls._build_filter(*include),
68 )
69 )
70
71 @classmethod
72 def _find_packages_iter(cls, where, exclude, include):
73 """
74 All the packages found in 'where' that pass the 'include' filter, but
75 not the 'exclude' filter.
76 """
77 for root, dirs, files in os.walk(where, followlinks=True):
78 # Copy dirs to iterate over it, then empty dirs.
79 all_dirs = dirs[:]
80 dirs[:] = []
81
82 for dir in all_dirs:
83 full_path = os.path.join(root, dir)
84 rel_path = os.path.relpath(full_path, where)
85 package = rel_path.replace(os.path.sep, '.')
86
87 # Skip directory trees that are not valid packages
88 if '.' in dir or not cls._looks_like_package(full_path):
89 continue
90
91 # Should this package be included?
92 if include(package) and not exclude(package):
93 yield package
94
95 # Keep searching subdirectories, as there may be more packages
96 # down there, even if the parent was excluded.
97 dirs.append(dir)
98
99 @staticmethod
100 def _looks_like_package(path):
101 """Does a directory look like a package?"""
102 return os.path.isfile(os.path.join(path, '__init__.py'))
103
104 @staticmethod
105 def _build_filter(*patterns):
106 """
107 Given a list of patterns, return a callable that will be true only if
108 the input matches at least one of the patterns.
109 """
110 return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
111
112
113 class PEP420PackageFinder(PackageFinder):
114 @staticmethod
115 def _looks_like_package(path):
116 return True
117
118
119 find_packages = PackageFinder.find
120 find_namespace_packages = PEP420PackageFinder.find
121
122
123 def _install_setup_requires(attrs):
124 # Note: do not use `setuptools.Distribution` directly, as
125 # our PEP 517 backend patch `distutils.core.Distribution`.
126 class MinimalDistribution(distutils.core.Distribution):
127 """
128 A minimal version of a distribution for supporting the
129 fetch_build_eggs interface.
130 """
131
132 def __init__(self, attrs):
133 _incl = 'dependency_links', 'setup_requires'
134 filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
135 distutils.core.Distribution.__init__(self, filtered)
136
137 def finalize_options(self):
138 """
139 Disable finalize_options to avoid building the working set.
140 Ref #2158.
141 """
142
143 dist = MinimalDistribution(attrs)
144
145 # Honor setup.cfg's options.
146 dist.parse_config_files(ignore_option_errors=True)
147 if dist.setup_requires:
148 warnings.warn(
149 "setup_requires is deprecated. Supply build "
150 "dependencies using PEP 517 pyproject.toml build-requires.",
151 SetuptoolsDeprecationWarning,
152 )
153 dist.fetch_build_eggs(dist.setup_requires)
154
155
156 def setup(**attrs):
157 # Make sure we have any requirements needed to interpret 'attrs'.
158 _install_setup_requires(attrs)
159 return distutils.core.setup(**attrs)
160
161
162 setup.__doc__ = distutils.core.setup.__doc__
163
164
165 _Command = monkey.get_unpatched(distutils.core.Command)
166
167
168 class Command(_Command):
169 __doc__ = _Command.__doc__
170
171 command_consumes_arguments = False
172
173 def __init__(self, dist, **kw):
174 """
175 Construct the command for dist, updating
176 vars(self) with any keyword parameters.
177 """
178 _Command.__init__(self, dist)
179 vars(self).update(kw)
180
181 def _ensure_stringlike(self, option, what, default=None):
182 val = getattr(self, option)
183 if val is None:
184 setattr(self, option, default)
185 return default
186 elif not isinstance(val, str):
187 raise DistutilsOptionError(
188 "'%s' must be a %s (got `%s`)" % (option, what, val)
189 )
190 return val
191
192 def ensure_string_list(self, option):
193 r"""Ensure that 'option' is a list of strings. If 'option' is
194 currently a string, we split it either on /,\s*/ or /\s+/, so
195 "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
196 ["foo", "bar", "baz"].
197 """
198 val = getattr(self, option)
199 if val is None:
200 return
201 elif isinstance(val, str):
202 setattr(self, option, re.split(r',\s*|\s+', val))
203 else:
204 if isinstance(val, list):
205 ok = all(isinstance(v, str) for v in val)
206 else:
207 ok = False
208 if not ok:
209 raise DistutilsOptionError(
210 "'%s' must be a list of strings (got %r)" % (option, val)
211 )
212
213 def reinitialize_command(self, command, reinit_subcommands=0, **kw):
214 cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
215 vars(cmd).update(kw)
216 return cmd
217
218
219 def _find_all_simple(path):
220 """
221 Find all files under 'path'
222 """
223 results = (
224 os.path.join(base, file)
225 for base, dirs, files in os.walk(path, followlinks=True)
226 for file in files
227 )
228 return filter(os.path.isfile, results)
229
230
231 def findall(dir=os.curdir):
232 """
233 Find all files under 'dir' and return the list of full filenames.
234 Unless dir is '.', return full filenames with dir prepended.
235 """
236 files = _find_all_simple(dir)
237 if dir == os.curdir:
238 make_rel = functools.partial(os.path.relpath, start=dir)
239 files = map(make_rel, files)
240 return list(files)
241
242
243 class sic(str):
244 """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
245
246
247 # Apply monkey patches
248 monkey.patch_all()
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/__init__.py b/setuptools/__init__.py
--- a/setuptools/__init__.py
+++ b/setuptools/__init__.py
@@ -4,7 +4,6 @@
import functools
import os
import re
-import warnings
import _distutils_hack.override # noqa: F401
@@ -145,11 +144,6 @@
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
- warnings.warn(
- "setup_requires is deprecated. Supply build "
- "dependencies using PEP 517 pyproject.toml build-requires.",
- SetuptoolsDeprecationWarning,
- )
dist.fetch_build_eggs(dist.setup_requires)
diff --git a/setuptools/installer.py b/setuptools/installer.py
--- a/setuptools/installer.py
+++ b/setuptools/installer.py
@@ -3,11 +3,13 @@
import subprocess
import sys
import tempfile
+import warnings
from distutils import log
from distutils.errors import DistutilsError
import pkg_resources
from setuptools.wheel import Wheel
+from ._deprecation_warning import SetuptoolsDeprecationWarning
def _fixup_find_links(find_links):
@@ -22,6 +24,11 @@
"""Fetch an egg needed for building.
Use pip/wheel to fetch/build a wheel."""
+ warnings.warn(
+ "setuptools.installer is deprecated. Requirements should "
+ "be satisfied by a PEP 517 installer.",
+ SetuptoolsDeprecationWarning,
+ )
# Warn if wheel is not available
try:
pkg_resources.get_distribution('wheel')
| {"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -4,7 +4,6 @@\n import functools\n import os\n import re\n-import warnings\n \n import _distutils_hack.override # noqa: F401\n \n@@ -145,11 +144,6 @@\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n- warnings.warn(\n- \"setup_requires is deprecated. Supply build \"\n- \"dependencies using PEP 517 pyproject.toml build-requires.\",\n- SetuptoolsDeprecationWarning,\n- )\n dist.fetch_build_eggs(dist.setup_requires)\n \n \ndiff --git a/setuptools/installer.py b/setuptools/installer.py\n--- a/setuptools/installer.py\n+++ b/setuptools/installer.py\n@@ -3,11 +3,13 @@\n import subprocess\n import sys\n import tempfile\n+import warnings\n from distutils import log\n from distutils.errors import DistutilsError\n \n import pkg_resources\n from setuptools.wheel import Wheel\n+from ._deprecation_warning import SetuptoolsDeprecationWarning\n \n \n def _fixup_find_links(find_links):\n@@ -22,6 +24,11 @@\n \"\"\"Fetch an egg needed for building.\n \n Use pip/wheel to fetch/build a wheel.\"\"\"\n+ warnings.warn(\n+ \"setuptools.installer is deprecated. Requirements should \"\n+ \"be satisfied by a PEP 517 installer.\",\n+ SetuptoolsDeprecationWarning,\n+ )\n # Warn if wheel is not available\n try:\n pkg_resources.get_distribution('wheel')\n", "issue": "Deprecate setuptools.installer but not setup_requires\n> You're going to break entire workflows and distros with this, please reconsider.\r\n\r\nI'm tempted to un-deprecate `setup_requires` as a parameter, but to deprecate/remove only the `fetch_build_eggs` part. In other words, continue to allow `setup_requires` to supply requirements for PEP 517 hooks (`get_requires_for_build_sdist` and `get_requires_for_build_wheel`).\r\n\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/setuptools/issues/2824#issuecomment-967430754_\n", "before_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.wheel import Wheel\n\n\ndef _fixup_find_links(find_links):\n \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n if isinstance(find_links, str):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links\n\n\ndef fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n # Warn if wheel is not available\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n find_links = (\n _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts\n else []\n )\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with tempfile.TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n for link in find_links or []:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n cmd.append(req.url or str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e)) from e\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}, {"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nfrom fnmatch import fnmatchcase\nimport functools\nimport os\nimport re\nimport warnings\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom . import monkey\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(\n cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include),\n )\n )\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if '.' in dir or not cls._looks_like_package(full_path):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n distutils.core.Distribution.__init__(self, filtered)\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n warnings.warn(\n \"setup_requires is deprecated. Supply build \"\n \"dependencies using PEP 517 pyproject.toml build-requires.\",\n SetuptoolsDeprecationWarning,\n )\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}], "after_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport warnings\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.wheel import Wheel\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\n\ndef _fixup_find_links(find_links):\n \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n if isinstance(find_links, str):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links\n\n\ndef fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n warnings.warn(\n \"setuptools.installer is deprecated. Requirements should \"\n \"be satisfied by a PEP 517 installer.\",\n SetuptoolsDeprecationWarning,\n )\n # Warn if wheel is not available\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n find_links = (\n _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts\n else []\n )\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with tempfile.TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n for link in find_links or []:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n cmd.append(req.url or str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e)) from e\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}, {"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nfrom fnmatch import fnmatchcase\nimport functools\nimport os\nimport re\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom . import monkey\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(\n cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include),\n )\n )\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if '.' in dir or not cls._looks_like_package(full_path):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n distutils.core.Distribution.__init__(self, filtered)\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]} | 3,815 | 377 |
gh_patches_debug_23934 | rasdani/github-patches | git_diff | microsoft__playwright-python-159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stdout/stderr pipe throws stacktrace if stdout/stderr is mocked by Pytest
I think more an upstream issue (here) instead of for the Pytest plugin.
Stacktrace:
```
pytest project-name/browser_tests --browser chromium --base-url http://localhost:8000/
============================= test session starts ==============================
platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1
baseurl: http://localhost:8000/
django: settings: config.settings.test (from option)
Using --randomly-seed=1
rootdir: /home/runner/work/project-name/project-name, configfile: setup.cfg
plugins: freezegun-0.4.2, base-url-1.4.2, playwright-0.0.5, cov-2.10.0, sugar-0.9.4, django-3.9.0, randomly-3.4.1, celery-4.4.7, factoryboy-2.0.3, Faker-4.1.1, env-0.6.2
collected 15 items
project-name/browser_tests/test_internal_manager_dashboard.py E [ 6%]
==================================== ERRORS ====================================
_ ERROR at setup of test123[chromium] _
launch_browser = <function launch_browser.<locals>.launch at 0x7f6bc73de430>
@pytest.fixture(scope="session")
def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:
> browser = launch_browser()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:97: in launch
pw_context = sync_playwright()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/__init__.py:27: in sync_playwright
return SyncPlaywrightContextManager()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:72: in __init__
self._connection = run_driver()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:67: in run_driver
return loop.run_until_complete(run_driver_async())
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:616: in run_until_complete
return future.result()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:48: in run_driver_async
proc = await asyncio.create_subprocess_exec(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/subprocess.py:236: in create_subprocess_exec
transport, protocol = await loop.subprocess_exec(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:1630: in subprocess_exec
transport = await self._make_subprocess_transport(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:197: in _make_subprocess_transport
transp = _UnixSubprocessTransport(self, protocol, args, shell,
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_subprocess.py:36: in __init__
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:789: in _start
self._proc = subprocess.Popen(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:804: in __init__
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <subprocess.Popen object at 0x7f6bc73dfa60>
stdin = <socket.socket [closed] fd=-1, family=AddressFamily.AF_UNIX, type=SocketKind.SOCK_STREAM, proto=0>
stdout = -1, stderr = <_io.TextIOWrapper encoding='UTF-8'>
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
if c2pwrite != -1:
errwrite = c2pwrite
else: # child's stdout is not set, use parent's stdout
errwrite = sys.__stdout__.fileno()
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
> errwrite = stderr.fileno()
E io.UnsupportedOperation: fileno
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:1504: UnsupportedOperation
---------------------------- Captured stdout setup -----------------------------
kektus <_io.TextIOWrapper encoding='UTF-8'>
=========================== short test summary info ============================
ERROR project-name/browser_tests/test123.py::test123[chromium]
!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.67s ===============================
Makefile:23: recipe for target 'browser_test' failed
```
Using the `-s` parameter is a workaround for that. Issue is persistent since https://github.com/microsoft/playwright-python/pull/145.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/main.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import subprocess
17 import sys
18 from typing import Any
19
20 from greenlet import greenlet
21
22 from playwright.async_api import Playwright as AsyncPlaywright
23 from playwright.connection import Connection
24 from playwright.helper import Error
25 from playwright.object_factory import create_remote_object
26 from playwright.path_utils import get_file_dirname
27 from playwright.playwright import Playwright
28 from playwright.sync_api import Playwright as SyncPlaywright
29 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber
30
31
32 def compute_driver_name() -> str:
33 platform = sys.platform
34 if platform == "darwin":
35 result = "driver-macos"
36 elif platform == "linux":
37 result = "driver-linux"
38 elif platform == "win32":
39 result = "driver-win.exe"
40 return result
41
42
43 async def run_driver_async() -> Connection:
44 package_path = get_file_dirname()
45 driver_name = compute_driver_name()
46 driver_executable = package_path / "drivers" / driver_name
47
48 proc = await asyncio.create_subprocess_exec(
49 str(driver_executable),
50 stdin=asyncio.subprocess.PIPE,
51 stdout=asyncio.subprocess.PIPE,
52 stderr=sys.stderr,
53 limit=32768,
54 )
55 assert proc.stdout
56 assert proc.stdin
57 connection = Connection(
58 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()
59 )
60 return connection
61
62
63 def run_driver() -> Connection:
64 loop = asyncio.get_event_loop()
65 if loop.is_running():
66 raise Error("Can only run one Playwright at a time.")
67 return loop.run_until_complete(run_driver_async())
68
69
70 class SyncPlaywrightContextManager:
71 def __init__(self) -> None:
72 self._connection = run_driver()
73 self._playwright: SyncPlaywright
74
75 def __enter__(self) -> SyncPlaywright:
76 g_self = greenlet.getcurrent()
77
78 def callback_wrapper(playwright_impl: Playwright) -> None:
79 self._playwright = SyncPlaywright(playwright_impl)
80 g_self.switch()
81
82 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper)
83 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))
84 dispatcher_fiber().switch()
85 return self._playwright
86
87 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
88 self._connection.stop_sync()
89
90
91 class AsyncPlaywrightContextManager:
92 def __init__(self) -> None:
93 self._connection: Connection
94
95 async def __aenter__(self) -> AsyncPlaywright:
96 self._connection = await run_driver_async()
97 self._connection.run_async()
98 return AsyncPlaywright(
99 await self._connection.wait_for_object_with_known_name("Playwright")
100 )
101
102 async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
103 self._connection.stop_async()
104
105
106 if sys.platform == "win32":
107 # Use ProactorEventLoop in 3.7, which is default in 3.8
108 loop = asyncio.ProactorEventLoop()
109 asyncio.set_event_loop(loop)
110
111
112 def main() -> None:
113 if "install" not in sys.argv:
114 print('Run "python -m playwright install" to complete installation')
115 return
116 package_path = get_file_dirname()
117 driver_name = compute_driver_name()
118 driver_executable = package_path / "drivers" / driver_name
119 print("Installing the browsers...")
120 subprocess.check_call(f"{driver_executable} install", shell=True)
121
122 print("Playwright is now ready for use")
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playwright/main.py b/playwright/main.py
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -13,6 +13,7 @@
# limitations under the License.
import asyncio
+import io
import subprocess
import sys
from typing import Any
@@ -45,11 +46,21 @@
driver_name = compute_driver_name()
driver_executable = package_path / "drivers" / driver_name
+ # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80
+ def _get_stderr_fileno() -> int:
+ try:
+ return sys.stderr.fileno()
+ except io.UnsupportedOperation:
+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
+ # This is potentially dangerous, but the best we can do.
+ return sys.__stderr__.fileno()
+
proc = await asyncio.create_subprocess_exec(
str(driver_executable),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=sys.stderr,
+ stderr=_get_stderr_fileno(),
limit=32768,
)
assert proc.stdout
| {"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import asyncio\n+import io\n import subprocess\n import sys\n from typing import Any\n@@ -45,11 +46,21 @@\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n \n+ # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n+ def _get_stderr_fileno() -> int:\n+ try:\n+ return sys.stderr.fileno()\n+ except io.UnsupportedOperation:\n+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n+ # This is potentially dangerous, but the best we can do.\n+ return sys.__stderr__.fileno()\n+\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=sys.stderr,\n+ stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n", "issue": "Stdout/stderr pipe throws stacktrace if stdout/stderr is mocked by Pytest\nI think more an upstream issue (here) instead of for the Pytest plugin.\r\n\r\nStacktrace:\r\n\r\n```\r\npytest project-name/browser_tests --browser chromium --base-url http://localhost:8000/\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1\r\nbaseurl: http://localhost:8000/\r\ndjango: settings: config.settings.test (from option)\r\nUsing --randomly-seed=1\r\nrootdir: /home/runner/work/project-name/project-name, configfile: setup.cfg\r\nplugins: freezegun-0.4.2, base-url-1.4.2, playwright-0.0.5, cov-2.10.0, sugar-0.9.4, django-3.9.0, randomly-3.4.1, celery-4.4.7, factoryboy-2.0.3, Faker-4.1.1, env-0.6.2\r\ncollected 15 items\r\n\r\nproject-name/browser_tests/test_internal_manager_dashboard.py E [ 6%]\r\n\r\n\r\n==================================== ERRORS ====================================\r\n_ ERROR at setup of test123[chromium] _\r\n\r\nlaunch_browser = <function launch_browser.<locals>.launch at 0x7f6bc73de430>\r\n\r\n @pytest.fixture(scope=\"session\")\r\n def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:\r\n> browser = launch_browser()\r\n\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:114: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:97: in launch\r\n pw_context = sync_playwright()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/__init__.py:27: in sync_playwright\r\n return SyncPlaywrightContextManager()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:72: in __init__\r\n self._connection = run_driver()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:67: in run_driver\r\n return loop.run_until_complete(run_driver_async())\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:616: in run_until_complete\r\n return future.result()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:48: in run_driver_async\r\n proc = await asyncio.create_subprocess_exec(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/subprocess.py:236: in create_subprocess_exec\r\n transport, protocol = await loop.subprocess_exec(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:1630: in subprocess_exec\r\n transport = await self._make_subprocess_transport(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:197: in _make_subprocess_transport\r\n transp = _UnixSubprocessTransport(self, protocol, args, shell,\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_subprocess.py:36: in __init__\r\n self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:789: in _start\r\n self._proc = subprocess.Popen(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:804: in __init__\r\n errread, errwrite) = self._get_handles(stdin, stdout, stderr)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nself = <subprocess.Popen object at 0x7f6bc73dfa60>\r\nstdin = <socket.socket [closed] fd=-1, family=AddressFamily.AF_UNIX, type=SocketKind.SOCK_STREAM, proto=0>\r\nstdout = -1, stderr = <_io.TextIOWrapper encoding='UTF-8'>\r\n\r\n def _get_handles(self, stdin, stdout, stderr):\r\n \"\"\"Construct and return tuple with IO objects:\r\n p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite\r\n \"\"\"\r\n p2cread, p2cwrite = -1, -1\r\n c2pread, c2pwrite = -1, -1\r\n errread, errwrite = -1, -1\r\n \r\n if stdin is None:\r\n pass\r\n elif stdin == PIPE:\r\n p2cread, p2cwrite = os.pipe()\r\n elif stdin == DEVNULL:\r\n p2cread = self._get_devnull()\r\n elif isinstance(stdin, int):\r\n p2cread = stdin\r\n else:\r\n # Assuming file-like object\r\n p2cread = stdin.fileno()\r\n \r\n if stdout is None:\r\n pass\r\n elif stdout == PIPE:\r\n c2pread, c2pwrite = os.pipe()\r\n elif stdout == DEVNULL:\r\n c2pwrite = self._get_devnull()\r\n elif isinstance(stdout, int):\r\n c2pwrite = stdout\r\n else:\r\n # Assuming file-like object\r\n c2pwrite = stdout.fileno()\r\n \r\n if stderr is None:\r\n pass\r\n elif stderr == PIPE:\r\n errread, errwrite = os.pipe()\r\n elif stderr == STDOUT:\r\n if c2pwrite != -1:\r\n errwrite = c2pwrite\r\n else: # child's stdout is not set, use parent's stdout\r\n errwrite = sys.__stdout__.fileno()\r\n elif stderr == DEVNULL:\r\n errwrite = self._get_devnull()\r\n elif isinstance(stderr, int):\r\n errwrite = stderr\r\n else:\r\n # Assuming file-like object\r\n> errwrite = stderr.fileno()\r\nE io.UnsupportedOperation: fileno\r\n\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:1504: UnsupportedOperation\r\n---------------------------- Captured stdout setup -----------------------------\r\nkektus <_io.TextIOWrapper encoding='UTF-8'>\r\n=========================== short test summary info ============================\r\nERROR project-name/browser_tests/test123.py::test123[chromium]\r\n!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n=============================== 1 error in 0.67s ===============================\r\nMakefile:23: recipe for target 'browser_test' failed\r\n\r\n```\r\n\r\nUsing the `-s` parameter is a workaround for that. Issue is persistent since https://github.com/microsoft/playwright-python/pull/145.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n def _get_stderr_fileno() -> int:\n try:\n return sys.stderr.fileno()\n except io.UnsupportedOperation:\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n return sys.__stderr__.fileno()\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}]} | 3,120 | 336 |
gh_patches_debug_20439 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-4611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`[p]userinfo` doesn't show the new `competing` activity type
# Command bugs
#### Command name
`[p]userinfo`
#### What cog is this command from?
Mod
#### What were you expecting to happen?
I expected `[p]userinfo` to show competing activity when using it on user with competing activity set.
#### What actually happened?
Command didn't show any activity.
#### How can we reproduce this issue?
1. Set "Competing in" activity on bot user with `[p]set competing` command.
2. Use `[p]userinfo` command on bot user.
3. See the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/mod/names.py`
Content:
```
1 from datetime import datetime
2 from typing import cast
3
4 import discord
5 from redbot.core import commands, i18n, checks
6 from redbot.core.utils.common_filters import (
7 filter_invites,
8 filter_various_mentions,
9 escape_spoilers_and_mass_mentions,
10 )
11 from redbot.core.utils.mod import get_audit_reason
12 from .abc import MixinMeta
13
14 _ = i18n.Translator("Mod", __file__)
15
16
17 class ModInfo(MixinMeta):
18 """
19 Commands regarding names, userinfo, etc.
20 """
21
22 async def get_names_and_nicks(self, user):
23 names = await self.config.user(user).past_names()
24 nicks = await self.config.member(user).past_nicks()
25 if names:
26 names = [escape_spoilers_and_mass_mentions(name) for name in names if name]
27 if nicks:
28 nicks = [escape_spoilers_and_mass_mentions(nick) for nick in nicks if nick]
29 return names, nicks
30
31 @commands.command()
32 @commands.guild_only()
33 @commands.bot_has_permissions(manage_nicknames=True)
34 @checks.admin_or_permissions(manage_nicknames=True)
35 async def rename(self, ctx: commands.Context, user: discord.Member, *, nickname: str = ""):
36 """Change a user's nickname.
37
38 Leaving the nickname empty will remove it.
39 """
40 nickname = nickname.strip()
41 me = cast(discord.Member, ctx.me)
42 if not nickname:
43 nickname = None
44 elif not 2 <= len(nickname) <= 32:
45 await ctx.send(_("Nicknames must be between 2 and 32 characters long."))
46 return
47 if not (
48 (me.guild_permissions.manage_nicknames or me.guild_permissions.administrator)
49 and me.top_role > user.top_role
50 and user != ctx.guild.owner
51 ):
52 await ctx.send(
53 _(
54 "I do not have permission to rename that member. They may be higher than or "
55 "equal to me in the role hierarchy."
56 )
57 )
58 else:
59 try:
60 await user.edit(reason=get_audit_reason(ctx.author, None), nick=nickname)
61 except discord.Forbidden:
62 # Just in case we missed something in the permissions check above
63 await ctx.send(_("I do not have permission to rename that member."))
64 except discord.HTTPException as exc:
65 if exc.status == 400: # BAD REQUEST
66 await ctx.send(_("That nickname is invalid."))
67 else:
68 await ctx.send(_("An unexpected error has occured."))
69 else:
70 await ctx.send(_("Done."))
71
72 def handle_custom(self, user):
73 a = [c for c in user.activities if c.type == discord.ActivityType.custom]
74 if not a:
75 return None, discord.ActivityType.custom
76 a = a[0]
77 c_status = None
78 if not a.name and not a.emoji:
79 return None, discord.ActivityType.custom
80 elif a.name and a.emoji:
81 c_status = _("Custom: {emoji} {name}").format(emoji=a.emoji, name=a.name)
82 elif a.emoji:
83 c_status = _("Custom: {emoji}").format(emoji=a.emoji)
84 elif a.name:
85 c_status = _("Custom: {name}").format(name=a.name)
86 return c_status, discord.ActivityType.custom
87
88 def handle_playing(self, user):
89 p_acts = [c for c in user.activities if c.type == discord.ActivityType.playing]
90 if not p_acts:
91 return None, discord.ActivityType.playing
92 p_act = p_acts[0]
93 act = _("Playing: {name}").format(name=p_act.name)
94 return act, discord.ActivityType.playing
95
96 def handle_streaming(self, user):
97 s_acts = [c for c in user.activities if c.type == discord.ActivityType.streaming]
98 if not s_acts:
99 return None, discord.ActivityType.streaming
100 s_act = s_acts[0]
101 if isinstance(s_act, discord.Streaming):
102 act = _("Streaming: [{name}{sep}{game}]({url})").format(
103 name=discord.utils.escape_markdown(s_act.name),
104 sep=" | " if s_act.game else "",
105 game=discord.utils.escape_markdown(s_act.game) if s_act.game else "",
106 url=s_act.url,
107 )
108 else:
109 act = _("Streaming: {name}").format(name=s_act.name)
110 return act, discord.ActivityType.streaming
111
112 def handle_listening(self, user):
113 l_acts = [c for c in user.activities if c.type == discord.ActivityType.listening]
114 if not l_acts:
115 return None, discord.ActivityType.listening
116 l_act = l_acts[0]
117 if isinstance(l_act, discord.Spotify):
118 act = _("Listening: [{title}{sep}{artist}]({url})").format(
119 title=discord.utils.escape_markdown(l_act.title),
120 sep=" | " if l_act.artist else "",
121 artist=discord.utils.escape_markdown(l_act.artist) if l_act.artist else "",
122 url=f"https://open.spotify.com/track/{l_act.track_id}",
123 )
124 else:
125 act = _("Listening: {title}").format(title=l_act.name)
126 return act, discord.ActivityType.listening
127
128 def handle_watching(self, user):
129 w_acts = [c for c in user.activities if c.type == discord.ActivityType.watching]
130 if not w_acts:
131 return None, discord.ActivityType.watching
132 w_act = w_acts[0]
133 act = _("Watching: {name}").format(name=w_act.name)
134 return act, discord.ActivityType.watching
135
136 def get_status_string(self, user):
137 string = ""
138 for a in [
139 self.handle_custom(user),
140 self.handle_playing(user),
141 self.handle_listening(user),
142 self.handle_streaming(user),
143 self.handle_watching(user),
144 ]:
145 status_string, status_type = a
146 if status_string is None:
147 continue
148 string += f"{status_string}\n"
149 return string
150
151 @commands.command()
152 @commands.guild_only()
153 @commands.bot_has_permissions(embed_links=True)
154 async def userinfo(self, ctx, *, user: discord.Member = None):
155 """Show information about a user.
156
157 This includes fields for status, discord join date, server
158 join date, voice state and previous names/nicknames.
159
160 If the user has no roles, previous names or previous nicknames,
161 these fields will be omitted.
162 """
163 author = ctx.author
164 guild = ctx.guild
165
166 if not user:
167 user = author
168
169 # A special case for a special someone :^)
170 special_date = datetime(2016, 1, 10, 6, 8, 4, 443000)
171 is_special = user.id == 96130341705637888 and guild.id == 133049272517001216
172
173 roles = user.roles[-1:0:-1]
174 names, nicks = await self.get_names_and_nicks(user)
175
176 joined_at = user.joined_at if not is_special else special_date
177 since_created = (ctx.message.created_at - user.created_at).days
178 if joined_at is not None:
179 since_joined = (ctx.message.created_at - joined_at).days
180 user_joined = joined_at.strftime("%d %b %Y %H:%M")
181 else:
182 since_joined = "?"
183 user_joined = _("Unknown")
184 user_created = user.created_at.strftime("%d %b %Y %H:%M")
185 voice_state = user.voice
186 member_number = (
187 sorted(guild.members, key=lambda m: m.joined_at or ctx.message.created_at).index(user)
188 + 1
189 )
190
191 created_on = _("{}\n({} days ago)").format(user_created, since_created)
192 joined_on = _("{}\n({} days ago)").format(user_joined, since_joined)
193
194 if any(a.type is discord.ActivityType.streaming for a in user.activities):
195 statusemoji = "\N{LARGE PURPLE CIRCLE}"
196 elif user.status.name == "online":
197 statusemoji = "\N{LARGE GREEN CIRCLE}"
198 elif user.status.name == "offline":
199 statusemoji = "\N{MEDIUM WHITE CIRCLE}\N{VARIATION SELECTOR-16}"
200 elif user.status.name == "dnd":
201 statusemoji = "\N{LARGE RED CIRCLE}"
202 elif user.status.name == "idle":
203 statusemoji = "\N{LARGE ORANGE CIRCLE}"
204 activity = _("Chilling in {} status").format(user.status)
205 status_string = self.get_status_string(user)
206
207 if roles:
208
209 role_str = ", ".join([x.mention for x in roles])
210 # 400 BAD REQUEST (error code: 50035): Invalid Form Body
211 # In embed.fields.2.value: Must be 1024 or fewer in length.
212 if len(role_str) > 1024:
213 # Alternative string building time.
214 # This is not the most optimal, but if you're hitting this, you are losing more time
215 # to every single check running on users than the occasional user info invoke
216 # We don't start by building this way, since the number of times we hit this should be
217 # infinitesimally small compared to when we don't across all uses of Red.
218 continuation_string = _(
219 "and {numeric_number} more roles not displayed due to embed limits."
220 )
221 available_length = 1024 - len(continuation_string) # do not attempt to tweak, i18n
222
223 role_chunks = []
224 remaining_roles = 0
225
226 for r in roles:
227 chunk = f"{r.mention}, "
228 chunk_size = len(chunk)
229
230 if chunk_size < available_length:
231 available_length -= chunk_size
232 role_chunks.append(chunk)
233 else:
234 remaining_roles += 1
235
236 role_chunks.append(continuation_string.format(numeric_number=remaining_roles))
237
238 role_str = "".join(role_chunks)
239
240 else:
241 role_str = None
242
243 data = discord.Embed(description=status_string or activity, colour=user.colour)
244
245 data.add_field(name=_("Joined Discord on"), value=created_on)
246 data.add_field(name=_("Joined this server on"), value=joined_on)
247 if role_str is not None:
248 data.add_field(
249 name=_("Roles") if len(roles) > 1 else _("Role"), value=role_str, inline=False
250 )
251 if names:
252 # May need sanitizing later, but mentions do not ping in embeds currently
253 val = filter_invites(", ".join(names))
254 data.add_field(
255 name=_("Previous Names") if len(names) > 1 else _("Previous Name"),
256 value=val,
257 inline=False,
258 )
259 if nicks:
260 # May need sanitizing later, but mentions do not ping in embeds currently
261 val = filter_invites(", ".join(nicks))
262 data.add_field(
263 name=_("Previous Nicknames") if len(nicks) > 1 else _("Previous Nickname"),
264 value=val,
265 inline=False,
266 )
267 if voice_state and voice_state.channel:
268 data.add_field(
269 name=_("Current voice channel"),
270 value="{0.mention} ID: {0.id}".format(voice_state.channel),
271 inline=False,
272 )
273 data.set_footer(text=_("Member #{} | User ID: {}").format(member_number, user.id))
274
275 name = str(user)
276 name = " ~ ".join((name, user.nick)) if user.nick else name
277 name = filter_invites(name)
278
279 avatar = user.avatar_url_as(static_format="png")
280 data.set_author(name=f"{statusemoji} {name}", url=avatar)
281 data.set_thumbnail(url=avatar)
282
283 await ctx.send(embed=data)
284
285 @commands.command()
286 async def names(self, ctx: commands.Context, *, user: discord.Member):
287 """Show previous names and nicknames of a user."""
288 names, nicks = await self.get_names_and_nicks(user)
289 msg = ""
290 if names:
291 msg += _("**Past 20 names**:")
292 msg += "\n"
293 msg += ", ".join(names)
294 if nicks:
295 if msg:
296 msg += "\n\n"
297 msg += _("**Past 20 nicknames**:")
298 msg += "\n"
299 msg += ", ".join(nicks)
300 if msg:
301 msg = filter_various_mentions(msg)
302 await ctx.send(msg)
303 else:
304 await ctx.send(_("That user doesn't have any recorded name or nickname change."))
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/cogs/mod/names.py b/redbot/cogs/mod/names.py
--- a/redbot/cogs/mod/names.py
+++ b/redbot/cogs/mod/names.py
@@ -133,6 +133,14 @@
act = _("Watching: {name}").format(name=w_act.name)
return act, discord.ActivityType.watching
+ def handle_competing(self, user):
+ w_acts = [c for c in user.activities if c.type == discord.ActivityType.competing]
+ if not w_acts:
+ return None, discord.ActivityType.competing
+ w_act = w_acts[0]
+ act = _("Competing in: {competing}").format(competing=w_act.name)
+ return act, discord.ActivityType.competing
+
def get_status_string(self, user):
string = ""
for a in [
@@ -141,6 +149,7 @@
self.handle_listening(user),
self.handle_streaming(user),
self.handle_watching(user),
+ self.handle_competing(user),
]:
status_string, status_type = a
if status_string is None:
| {"golden_diff": "diff --git a/redbot/cogs/mod/names.py b/redbot/cogs/mod/names.py\n--- a/redbot/cogs/mod/names.py\n+++ b/redbot/cogs/mod/names.py\n@@ -133,6 +133,14 @@\n act = _(\"Watching: {name}\").format(name=w_act.name)\n return act, discord.ActivityType.watching\n \n+ def handle_competing(self, user):\n+ w_acts = [c for c in user.activities if c.type == discord.ActivityType.competing]\n+ if not w_acts:\n+ return None, discord.ActivityType.competing\n+ w_act = w_acts[0]\n+ act = _(\"Competing in: {competing}\").format(competing=w_act.name)\n+ return act, discord.ActivityType.competing\n+\n def get_status_string(self, user):\n string = \"\"\n for a in [\n@@ -141,6 +149,7 @@\n self.handle_listening(user),\n self.handle_streaming(user),\n self.handle_watching(user),\n+ self.handle_competing(user),\n ]:\n status_string, status_type = a\n if status_string is None:\n", "issue": "`[p]userinfo` doesn't show the new `competing` activity type\n# Command bugs\r\n\r\n#### Command name\r\n\r\n`[p]userinfo`\r\n\r\n#### What cog is this command from?\r\n\r\nMod\r\n\r\n#### What were you expecting to happen?\r\n\r\nI expected `[p]userinfo` to show competing activity when using it on user with competing activity set.\r\n\r\n#### What actually happened?\r\n\r\nCommand didn't show any activity.\r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Set \"Competing in\" activity on bot user with `[p]set competing` command.\r\n2. Use `[p]userinfo` command on bot user.\r\n3. See the issue.\r\n\n", "before_files": [{"content": "from datetime import datetime\nfrom typing import cast\n\nimport discord\nfrom redbot.core import commands, i18n, checks\nfrom redbot.core.utils.common_filters import (\n filter_invites,\n filter_various_mentions,\n escape_spoilers_and_mass_mentions,\n)\nfrom redbot.core.utils.mod import get_audit_reason\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\n\n\nclass ModInfo(MixinMeta):\n \"\"\"\n Commands regarding names, userinfo, etc.\n \"\"\"\n\n async def get_names_and_nicks(self, user):\n names = await self.config.user(user).past_names()\n nicks = await self.config.member(user).past_nicks()\n if names:\n names = [escape_spoilers_and_mass_mentions(name) for name in names if name]\n if nicks:\n nicks = [escape_spoilers_and_mass_mentions(nick) for nick in nicks if nick]\n return names, nicks\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_nicknames=True)\n @checks.admin_or_permissions(manage_nicknames=True)\n async def rename(self, ctx: commands.Context, user: discord.Member, *, nickname: str = \"\"):\n \"\"\"Change a user's nickname.\n\n Leaving the nickname empty will remove it.\n \"\"\"\n nickname = nickname.strip()\n me = cast(discord.Member, ctx.me)\n if not nickname:\n nickname = None\n elif not 2 <= len(nickname) <= 32:\n await ctx.send(_(\"Nicknames must be between 2 and 32 characters long.\"))\n return\n if not (\n (me.guild_permissions.manage_nicknames or me.guild_permissions.administrator)\n and me.top_role > user.top_role\n and user != ctx.guild.owner\n ):\n await ctx.send(\n _(\n \"I do not have permission to rename that member. They may be higher than or \"\n \"equal to me in the role hierarchy.\"\n )\n )\n else:\n try:\n await user.edit(reason=get_audit_reason(ctx.author, None), nick=nickname)\n except discord.Forbidden:\n # Just in case we missed something in the permissions check above\n await ctx.send(_(\"I do not have permission to rename that member.\"))\n except discord.HTTPException as exc:\n if exc.status == 400: # BAD REQUEST\n await ctx.send(_(\"That nickname is invalid.\"))\n else:\n await ctx.send(_(\"An unexpected error has occured.\"))\n else:\n await ctx.send(_(\"Done.\"))\n\n def handle_custom(self, user):\n a = [c for c in user.activities if c.type == discord.ActivityType.custom]\n if not a:\n return None, discord.ActivityType.custom\n a = a[0]\n c_status = None\n if not a.name and not a.emoji:\n return None, discord.ActivityType.custom\n elif a.name and a.emoji:\n c_status = _(\"Custom: {emoji} {name}\").format(emoji=a.emoji, name=a.name)\n elif a.emoji:\n c_status = _(\"Custom: {emoji}\").format(emoji=a.emoji)\n elif a.name:\n c_status = _(\"Custom: {name}\").format(name=a.name)\n return c_status, discord.ActivityType.custom\n\n def handle_playing(self, user):\n p_acts = [c for c in user.activities if c.type == discord.ActivityType.playing]\n if not p_acts:\n return None, discord.ActivityType.playing\n p_act = p_acts[0]\n act = _(\"Playing: {name}\").format(name=p_act.name)\n return act, discord.ActivityType.playing\n\n def handle_streaming(self, user):\n s_acts = [c for c in user.activities if c.type == discord.ActivityType.streaming]\n if not s_acts:\n return None, discord.ActivityType.streaming\n s_act = s_acts[0]\n if isinstance(s_act, discord.Streaming):\n act = _(\"Streaming: [{name}{sep}{game}]({url})\").format(\n name=discord.utils.escape_markdown(s_act.name),\n sep=\" | \" if s_act.game else \"\",\n game=discord.utils.escape_markdown(s_act.game) if s_act.game else \"\",\n url=s_act.url,\n )\n else:\n act = _(\"Streaming: {name}\").format(name=s_act.name)\n return act, discord.ActivityType.streaming\n\n def handle_listening(self, user):\n l_acts = [c for c in user.activities if c.type == discord.ActivityType.listening]\n if not l_acts:\n return None, discord.ActivityType.listening\n l_act = l_acts[0]\n if isinstance(l_act, discord.Spotify):\n act = _(\"Listening: [{title}{sep}{artist}]({url})\").format(\n title=discord.utils.escape_markdown(l_act.title),\n sep=\" | \" if l_act.artist else \"\",\n artist=discord.utils.escape_markdown(l_act.artist) if l_act.artist else \"\",\n url=f\"https://open.spotify.com/track/{l_act.track_id}\",\n )\n else:\n act = _(\"Listening: {title}\").format(title=l_act.name)\n return act, discord.ActivityType.listening\n\n def handle_watching(self, user):\n w_acts = [c for c in user.activities if c.type == discord.ActivityType.watching]\n if not w_acts:\n return None, discord.ActivityType.watching\n w_act = w_acts[0]\n act = _(\"Watching: {name}\").format(name=w_act.name)\n return act, discord.ActivityType.watching\n\n def get_status_string(self, user):\n string = \"\"\n for a in [\n self.handle_custom(user),\n self.handle_playing(user),\n self.handle_listening(user),\n self.handle_streaming(user),\n self.handle_watching(user),\n ]:\n status_string, status_type = a\n if status_string is None:\n continue\n string += f\"{status_string}\\n\"\n return string\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(embed_links=True)\n async def userinfo(self, ctx, *, user: discord.Member = None):\n \"\"\"Show information about a user.\n\n This includes fields for status, discord join date, server\n join date, voice state and previous names/nicknames.\n\n If the user has no roles, previous names or previous nicknames,\n these fields will be omitted.\n \"\"\"\n author = ctx.author\n guild = ctx.guild\n\n if not user:\n user = author\n\n # A special case for a special someone :^)\n special_date = datetime(2016, 1, 10, 6, 8, 4, 443000)\n is_special = user.id == 96130341705637888 and guild.id == 133049272517001216\n\n roles = user.roles[-1:0:-1]\n names, nicks = await self.get_names_and_nicks(user)\n\n joined_at = user.joined_at if not is_special else special_date\n since_created = (ctx.message.created_at - user.created_at).days\n if joined_at is not None:\n since_joined = (ctx.message.created_at - joined_at).days\n user_joined = joined_at.strftime(\"%d %b %Y %H:%M\")\n else:\n since_joined = \"?\"\n user_joined = _(\"Unknown\")\n user_created = user.created_at.strftime(\"%d %b %Y %H:%M\")\n voice_state = user.voice\n member_number = (\n sorted(guild.members, key=lambda m: m.joined_at or ctx.message.created_at).index(user)\n + 1\n )\n\n created_on = _(\"{}\\n({} days ago)\").format(user_created, since_created)\n joined_on = _(\"{}\\n({} days ago)\").format(user_joined, since_joined)\n\n if any(a.type is discord.ActivityType.streaming for a in user.activities):\n statusemoji = \"\\N{LARGE PURPLE CIRCLE}\"\n elif user.status.name == \"online\":\n statusemoji = \"\\N{LARGE GREEN CIRCLE}\"\n elif user.status.name == \"offline\":\n statusemoji = \"\\N{MEDIUM WHITE CIRCLE}\\N{VARIATION SELECTOR-16}\"\n elif user.status.name == \"dnd\":\n statusemoji = \"\\N{LARGE RED CIRCLE}\"\n elif user.status.name == \"idle\":\n statusemoji = \"\\N{LARGE ORANGE CIRCLE}\"\n activity = _(\"Chilling in {} status\").format(user.status)\n status_string = self.get_status_string(user)\n\n if roles:\n\n role_str = \", \".join([x.mention for x in roles])\n # 400 BAD REQUEST (error code: 50035): Invalid Form Body\n # In embed.fields.2.value: Must be 1024 or fewer in length.\n if len(role_str) > 1024:\n # Alternative string building time.\n # This is not the most optimal, but if you're hitting this, you are losing more time\n # to every single check running on users than the occasional user info invoke\n # We don't start by building this way, since the number of times we hit this should be\n # infinitesimally small compared to when we don't across all uses of Red.\n continuation_string = _(\n \"and {numeric_number} more roles not displayed due to embed limits.\"\n )\n available_length = 1024 - len(continuation_string) # do not attempt to tweak, i18n\n\n role_chunks = []\n remaining_roles = 0\n\n for r in roles:\n chunk = f\"{r.mention}, \"\n chunk_size = len(chunk)\n\n if chunk_size < available_length:\n available_length -= chunk_size\n role_chunks.append(chunk)\n else:\n remaining_roles += 1\n\n role_chunks.append(continuation_string.format(numeric_number=remaining_roles))\n\n role_str = \"\".join(role_chunks)\n\n else:\n role_str = None\n\n data = discord.Embed(description=status_string or activity, colour=user.colour)\n\n data.add_field(name=_(\"Joined Discord on\"), value=created_on)\n data.add_field(name=_(\"Joined this server on\"), value=joined_on)\n if role_str is not None:\n data.add_field(\n name=_(\"Roles\") if len(roles) > 1 else _(\"Role\"), value=role_str, inline=False\n )\n if names:\n # May need sanitizing later, but mentions do not ping in embeds currently\n val = filter_invites(\", \".join(names))\n data.add_field(\n name=_(\"Previous Names\") if len(names) > 1 else _(\"Previous Name\"),\n value=val,\n inline=False,\n )\n if nicks:\n # May need sanitizing later, but mentions do not ping in embeds currently\n val = filter_invites(\", \".join(nicks))\n data.add_field(\n name=_(\"Previous Nicknames\") if len(nicks) > 1 else _(\"Previous Nickname\"),\n value=val,\n inline=False,\n )\n if voice_state and voice_state.channel:\n data.add_field(\n name=_(\"Current voice channel\"),\n value=\"{0.mention} ID: {0.id}\".format(voice_state.channel),\n inline=False,\n )\n data.set_footer(text=_(\"Member #{} | User ID: {}\").format(member_number, user.id))\n\n name = str(user)\n name = \" ~ \".join((name, user.nick)) if user.nick else name\n name = filter_invites(name)\n\n avatar = user.avatar_url_as(static_format=\"png\")\n data.set_author(name=f\"{statusemoji} {name}\", url=avatar)\n data.set_thumbnail(url=avatar)\n\n await ctx.send(embed=data)\n\n @commands.command()\n async def names(self, ctx: commands.Context, *, user: discord.Member):\n \"\"\"Show previous names and nicknames of a user.\"\"\"\n names, nicks = await self.get_names_and_nicks(user)\n msg = \"\"\n if names:\n msg += _(\"**Past 20 names**:\")\n msg += \"\\n\"\n msg += \", \".join(names)\n if nicks:\n if msg:\n msg += \"\\n\\n\"\n msg += _(\"**Past 20 nicknames**:\")\n msg += \"\\n\"\n msg += \", \".join(nicks)\n if msg:\n msg = filter_various_mentions(msg)\n await ctx.send(msg)\n else:\n await ctx.send(_(\"That user doesn't have any recorded name or nickname change.\"))\n", "path": "redbot/cogs/mod/names.py"}], "after_files": [{"content": "from datetime import datetime\nfrom typing import cast\n\nimport discord\nfrom redbot.core import commands, i18n, checks\nfrom redbot.core.utils.common_filters import (\n filter_invites,\n filter_various_mentions,\n escape_spoilers_and_mass_mentions,\n)\nfrom redbot.core.utils.mod import get_audit_reason\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\n\n\nclass ModInfo(MixinMeta):\n \"\"\"\n Commands regarding names, userinfo, etc.\n \"\"\"\n\n async def get_names_and_nicks(self, user):\n names = await self.config.user(user).past_names()\n nicks = await self.config.member(user).past_nicks()\n if names:\n names = [escape_spoilers_and_mass_mentions(name) for name in names if name]\n if nicks:\n nicks = [escape_spoilers_and_mass_mentions(nick) for nick in nicks if nick]\n return names, nicks\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(manage_nicknames=True)\n @checks.admin_or_permissions(manage_nicknames=True)\n async def rename(self, ctx: commands.Context, user: discord.Member, *, nickname: str = \"\"):\n \"\"\"Change a user's nickname.\n\n Leaving the nickname empty will remove it.\n \"\"\"\n nickname = nickname.strip()\n me = cast(discord.Member, ctx.me)\n if not nickname:\n nickname = None\n elif not 2 <= len(nickname) <= 32:\n await ctx.send(_(\"Nicknames must be between 2 and 32 characters long.\"))\n return\n if not (\n (me.guild_permissions.manage_nicknames or me.guild_permissions.administrator)\n and me.top_role > user.top_role\n and user != ctx.guild.owner\n ):\n await ctx.send(\n _(\n \"I do not have permission to rename that member. They may be higher than or \"\n \"equal to me in the role hierarchy.\"\n )\n )\n else:\n try:\n await user.edit(reason=get_audit_reason(ctx.author, None), nick=nickname)\n except discord.Forbidden:\n # Just in case we missed something in the permissions check above\n await ctx.send(_(\"I do not have permission to rename that member.\"))\n except discord.HTTPException as exc:\n if exc.status == 400: # BAD REQUEST\n await ctx.send(_(\"That nickname is invalid.\"))\n else:\n await ctx.send(_(\"An unexpected error has occured.\"))\n else:\n await ctx.send(_(\"Done.\"))\n\n def handle_custom(self, user):\n a = [c for c in user.activities if c.type == discord.ActivityType.custom]\n if not a:\n return None, discord.ActivityType.custom\n a = a[0]\n c_status = None\n if not a.name and not a.emoji:\n return None, discord.ActivityType.custom\n elif a.name and a.emoji:\n c_status = _(\"Custom: {emoji} {name}\").format(emoji=a.emoji, name=a.name)\n elif a.emoji:\n c_status = _(\"Custom: {emoji}\").format(emoji=a.emoji)\n elif a.name:\n c_status = _(\"Custom: {name}\").format(name=a.name)\n return c_status, discord.ActivityType.custom\n\n def handle_playing(self, user):\n p_acts = [c for c in user.activities if c.type == discord.ActivityType.playing]\n if not p_acts:\n return None, discord.ActivityType.playing\n p_act = p_acts[0]\n act = _(\"Playing: {name}\").format(name=p_act.name)\n return act, discord.ActivityType.playing\n\n def handle_streaming(self, user):\n s_acts = [c for c in user.activities if c.type == discord.ActivityType.streaming]\n if not s_acts:\n return None, discord.ActivityType.streaming\n s_act = s_acts[0]\n if isinstance(s_act, discord.Streaming):\n act = _(\"Streaming: [{name}{sep}{game}]({url})\").format(\n name=discord.utils.escape_markdown(s_act.name),\n sep=\" | \" if s_act.game else \"\",\n game=discord.utils.escape_markdown(s_act.game) if s_act.game else \"\",\n url=s_act.url,\n )\n else:\n act = _(\"Streaming: {name}\").format(name=s_act.name)\n return act, discord.ActivityType.streaming\n\n def handle_listening(self, user):\n l_acts = [c for c in user.activities if c.type == discord.ActivityType.listening]\n if not l_acts:\n return None, discord.ActivityType.listening\n l_act = l_acts[0]\n if isinstance(l_act, discord.Spotify):\n act = _(\"Listening: [{title}{sep}{artist}]({url})\").format(\n title=discord.utils.escape_markdown(l_act.title),\n sep=\" | \" if l_act.artist else \"\",\n artist=discord.utils.escape_markdown(l_act.artist) if l_act.artist else \"\",\n url=f\"https://open.spotify.com/track/{l_act.track_id}\",\n )\n else:\n act = _(\"Listening: {title}\").format(title=l_act.name)\n return act, discord.ActivityType.listening\n\n def handle_watching(self, user):\n w_acts = [c for c in user.activities if c.type == discord.ActivityType.watching]\n if not w_acts:\n return None, discord.ActivityType.watching\n w_act = w_acts[0]\n act = _(\"Watching: {name}\").format(name=w_act.name)\n return act, discord.ActivityType.watching\n\n def handle_competing(self, user):\n w_acts = [c for c in user.activities if c.type == discord.ActivityType.competing]\n if not w_acts:\n return None, discord.ActivityType.competing\n w_act = w_acts[0]\n act = _(\"Competing in: {competing}\").format(competing=w_act.name)\n return act, discord.ActivityType.competing\n\n def get_status_string(self, user):\n string = \"\"\n for a in [\n self.handle_custom(user),\n self.handle_playing(user),\n self.handle_listening(user),\n self.handle_streaming(user),\n self.handle_watching(user),\n self.handle_competing(user),\n ]:\n status_string, status_type = a\n if status_string is None:\n continue\n string += f\"{status_string}\\n\"\n return string\n\n @commands.command()\n @commands.guild_only()\n @commands.bot_has_permissions(embed_links=True)\n async def userinfo(self, ctx, *, user: discord.Member = None):\n \"\"\"Show information about a user.\n\n This includes fields for status, discord join date, server\n join date, voice state and previous names/nicknames.\n\n If the user has no roles, previous names or previous nicknames,\n these fields will be omitted.\n \"\"\"\n author = ctx.author\n guild = ctx.guild\n\n if not user:\n user = author\n\n # A special case for a special someone :^)\n special_date = datetime(2016, 1, 10, 6, 8, 4, 443000)\n is_special = user.id == 96130341705637888 and guild.id == 133049272517001216\n\n roles = user.roles[-1:0:-1]\n names, nicks = await self.get_names_and_nicks(user)\n\n joined_at = user.joined_at if not is_special else special_date\n since_created = (ctx.message.created_at - user.created_at).days\n if joined_at is not None:\n since_joined = (ctx.message.created_at - joined_at).days\n user_joined = joined_at.strftime(\"%d %b %Y %H:%M\")\n else:\n since_joined = \"?\"\n user_joined = _(\"Unknown\")\n user_created = user.created_at.strftime(\"%d %b %Y %H:%M\")\n voice_state = user.voice\n member_number = (\n sorted(guild.members, key=lambda m: m.joined_at or ctx.message.created_at).index(user)\n + 1\n )\n\n created_on = _(\"{}\\n({} days ago)\").format(user_created, since_created)\n joined_on = _(\"{}\\n({} days ago)\").format(user_joined, since_joined)\n\n if any(a.type is discord.ActivityType.streaming for a in user.activities):\n statusemoji = \"\\N{LARGE PURPLE CIRCLE}\"\n elif user.status.name == \"online\":\n statusemoji = \"\\N{LARGE GREEN CIRCLE}\"\n elif user.status.name == \"offline\":\n statusemoji = \"\\N{MEDIUM WHITE CIRCLE}\\N{VARIATION SELECTOR-16}\"\n elif user.status.name == \"dnd\":\n statusemoji = \"\\N{LARGE RED CIRCLE}\"\n elif user.status.name == \"idle\":\n statusemoji = \"\\N{LARGE ORANGE CIRCLE}\"\n activity = _(\"Chilling in {} status\").format(user.status)\n status_string = self.get_status_string(user)\n\n if roles:\n\n role_str = \", \".join([x.mention for x in roles])\n # 400 BAD REQUEST (error code: 50035): Invalid Form Body\n # In embed.fields.2.value: Must be 1024 or fewer in length.\n if len(role_str) > 1024:\n # Alternative string building time.\n # This is not the most optimal, but if you're hitting this, you are losing more time\n # to every single check running on users than the occasional user info invoke\n # We don't start by building this way, since the number of times we hit this should be\n # infinitesimally small compared to when we don't across all uses of Red.\n continuation_string = _(\n \"and {numeric_number} more roles not displayed due to embed limits.\"\n )\n available_length = 1024 - len(continuation_string) # do not attempt to tweak, i18n\n\n role_chunks = []\n remaining_roles = 0\n\n for r in roles:\n chunk = f\"{r.mention}, \"\n chunk_size = len(chunk)\n\n if chunk_size < available_length:\n available_length -= chunk_size\n role_chunks.append(chunk)\n else:\n remaining_roles += 1\n\n role_chunks.append(continuation_string.format(numeric_number=remaining_roles))\n\n role_str = \"\".join(role_chunks)\n\n else:\n role_str = None\n\n data = discord.Embed(description=status_string or activity, colour=user.colour)\n\n data.add_field(name=_(\"Joined Discord on\"), value=created_on)\n data.add_field(name=_(\"Joined this server on\"), value=joined_on)\n if role_str is not None:\n data.add_field(\n name=_(\"Roles\") if len(roles) > 1 else _(\"Role\"), value=role_str, inline=False\n )\n if names:\n # May need sanitizing later, but mentions do not ping in embeds currently\n val = filter_invites(\", \".join(names))\n data.add_field(\n name=_(\"Previous Names\") if len(names) > 1 else _(\"Previous Name\"),\n value=val,\n inline=False,\n )\n if nicks:\n # May need sanitizing later, but mentions do not ping in embeds currently\n val = filter_invites(\", \".join(nicks))\n data.add_field(\n name=_(\"Previous Nicknames\") if len(nicks) > 1 else _(\"Previous Nickname\"),\n value=val,\n inline=False,\n )\n if voice_state and voice_state.channel:\n data.add_field(\n name=_(\"Current voice channel\"),\n value=\"{0.mention} ID: {0.id}\".format(voice_state.channel),\n inline=False,\n )\n data.set_footer(text=_(\"Member #{} | User ID: {}\").format(member_number, user.id))\n\n name = str(user)\n name = \" ~ \".join((name, user.nick)) if user.nick else name\n name = filter_invites(name)\n\n avatar = user.avatar_url_as(static_format=\"png\")\n data.set_author(name=f\"{statusemoji} {name}\", url=avatar)\n data.set_thumbnail(url=avatar)\n\n await ctx.send(embed=data)\n\n @commands.command()\n async def names(self, ctx: commands.Context, *, user: discord.Member):\n \"\"\"Show previous names and nicknames of a user.\"\"\"\n names, nicks = await self.get_names_and_nicks(user)\n msg = \"\"\n if names:\n msg += _(\"**Past 20 names**:\")\n msg += \"\\n\"\n msg += \", \".join(names)\n if nicks:\n if msg:\n msg += \"\\n\\n\"\n msg += _(\"**Past 20 nicknames**:\")\n msg += \"\\n\"\n msg += \", \".join(nicks)\n if msg:\n msg = filter_various_mentions(msg)\n await ctx.send(msg)\n else:\n await ctx.send(_(\"That user doesn't have any recorded name or nickname change.\"))\n", "path": "redbot/cogs/mod/names.py"}]} | 4,003 | 266 |
gh_patches_debug_19278 | rasdani/github-patches | git_diff | Pycord-Development__pycord-645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redesign ApplicationContext.respond
### Summary
The ApplicationContext.respond property needs to be redesigned.
### What is the feature request for?
The core library
### The Problem
The `ApplicationContext.respond` property returns 2 different functions depending on `InteractionResponse.is_done()`. Both take different parameters and return different objects. This is bad practice because it can result in unexpected behavior. Also, type-hinting is kinda hard.
### The Ideal Solution
It might make sense to create a function `ApplicationContext.respond` that merges all parameters. Depending on `InteractionResponse.is_done()` it ignores the parameter of the other function. The only remaining problem is the different return types. This would simplify the situation, but isn't perfect yet. Maybe someone got a better idea.
### The Current Solution
A property that returns 2 different functions depending on `InteractionResponse.is_done()`.
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/commands/context.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25 from __future__ import annotations
26
27 from typing import TYPE_CHECKING, Optional, Union
28
29 import discord.abc
30
31 if TYPE_CHECKING:
32 import discord
33 from discord import Bot
34 from discord.state import ConnectionState
35
36 from .commands import ApplicationCommand, Option
37 from ..cog import Cog
38
39 from ..guild import Guild
40 from ..interactions import Interaction, InteractionResponse
41 from ..member import Member
42 from ..message import Message
43 from ..user import User
44 from ..utils import cached_property
45
46 __all__ = (
47 "ApplicationContext",
48 "AutocompleteContext"
49 )
50
51 class ApplicationContext(discord.abc.Messageable):
52 """Represents a Discord application command interaction context.
53
54 This class is not created manually and is instead passed to application
55 commands as the first parameter.
56
57 .. versionadded:: 2.0
58
59 Attributes
60 -----------
61 bot: :class:`.Bot`
62 The bot that the command belongs to.
63 interaction: :class:`.Interaction`
64 The interaction object that invoked the command.
65 command: :class:`.ApplicationCommand`
66 The command that this context belongs to.
67 """
68
69 def __init__(self, bot: Bot, interaction: Interaction):
70 self.bot = bot
71 self.interaction = interaction
72
73 # below attributes will be set after initialization
74 self.command: ApplicationCommand = None # type: ignore
75 self.focused: Option = None # type: ignore
76 self.value: str = None # type: ignore
77 self.options: dict = None # type: ignore
78
79 self._state: ConnectionState = self.interaction._state
80
81 async def _get_channel(self) -> discord.abc.Messageable:
82 return self.channel
83
84 @cached_property
85 def channel(self):
86 return self.interaction.channel
87
88 @cached_property
89 def channel_id(self) -> Optional[int]:
90 return self.interaction.channel_id
91
92 @cached_property
93 def guild(self) -> Optional[Guild]:
94 return self.interaction.guild
95
96 @cached_property
97 def guild_id(self) -> Optional[int]:
98 return self.interaction.guild_id
99
100 @cached_property
101 def locale(self) -> Optional[str]:
102 return self.interaction.locale
103
104 @cached_property
105 def guild_locale(self) -> Optional[str]:
106 return self.interaction.guild_locale
107
108 @cached_property
109 def me(self) -> Union[Member, User]:
110 return self.guild.me if self.guild is not None else self.bot.user
111
112 @cached_property
113 def message(self) -> Optional[Message]:
114 return self.interaction.message
115
116 @cached_property
117 def user(self) -> Optional[Union[Member, User]]:
118 return self.interaction.user
119
120 @cached_property
121 def author(self) -> Optional[Union[Member, User]]:
122 return self.user
123
124 @property
125 def voice_client(self):
126 if self.guild is None:
127 return None
128
129 return self.guild.voice_client
130
131 @cached_property
132 def response(self) -> InteractionResponse:
133 return self.interaction.response
134
135 @property
136 def respond(self):
137 return self.followup.send if self.response.is_done() else self.interaction.response.send_message
138
139 @property
140 def defer(self):
141 return self.interaction.response.defer
142
143 @property
144 def followup(self):
145 return self.interaction.followup
146
147 async def delete(self):
148 """Calls :attr:`~discord.commands.ApplicationContext.respond`.
149 If the response is done, then calls :attr:`~discord.commands.ApplicationContext.respond` first."""
150 if not self.response.is_done():
151 await self.defer()
152
153 return await self.interaction.delete_original_message()
154
155 @property
156 def edit(self):
157 return self.interaction.edit_original_message
158
159 @property
160 def cog(self) -> Optional[Cog]:
161 """Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist."""
162 if self.command is None:
163 return None
164
165 return self.command.cog
166
167
168 class AutocompleteContext:
169 """Represents context for a slash command's option autocomplete.
170
171 This class is not created manually and is instead passed to an Option's autocomplete callback.
172
173 .. versionadded:: 2.0
174
175 Attributes
176 -----------
177 bot: :class:`.Bot`
178 The bot that the command belongs to.
179 interaction: :class:`.Interaction`
180 The interaction object that invoked the autocomplete.
181 command: :class:`.ApplicationCommand`
182 The command that this context belongs to.
183 focused: :class:`.Option`
184 The option the user is currently typing.
185 value: :class:`.str`
186 The content of the focused option.
187 options :class:`.dict`
188 A name to value mapping of the options that the user has selected before this option.
189 """
190
191 __slots__ = ("bot", "interaction", "command", "focused", "value", "options")
192
193 def __init__(self, bot: Bot, interaction: Interaction) -> None:
194 self.bot = bot
195 self.interaction = interaction
196
197 self.command: ApplicationCommand = None # type: ignore
198 self.focused: Option = None # type: ignore
199 self.value: str = None # type: ignore
200 self.options: dict = None # type: ignore
201
202 @property
203 def cog(self) -> Optional[Cog]:
204 """Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist."""
205 if self.command is None:
206 return None
207
208 return self.command.cog
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/commands/context.py b/discord/commands/context.py
--- a/discord/commands/context.py
+++ b/discord/commands/context.py
@@ -134,7 +134,26 @@
@property
def respond(self):
- return self.followup.send if self.response.is_done() else self.interaction.response.send_message
+ if not self.response.is_done():
+ return self.interaction.response.send_message
+ else:
+ raise RuntimeError(f"Interaction was already issued a response. Try using {type(self).__name__}.send_followup() instead.")
+
+ @property
+ async def send_response(self) -> Callable[..., Union[Interaction, Webhook]]:
+ """Callable[..., Union[:class:`~.Interaction`, :class:`~.Webhook`]]: Sends either a response
+ or a followup response depending if the interaction has been responded to yet or not."""
+ if not self.response.is_done():
+ return self.interaction.response.send_message # self.response
+ else:
+ return self.followup.send # self.send_followup
+
+ @property
+ def send_followup(self):
+ if self.response.is_done():
+ return self.followup.send
+ else:
+ raise RuntimeError(f"Interaction was not yet issued a response. Try using {type(self).__name__}.respond() first.")
@property
def defer(self):
| {"golden_diff": "diff --git a/discord/commands/context.py b/discord/commands/context.py\n--- a/discord/commands/context.py\n+++ b/discord/commands/context.py\n@@ -134,7 +134,26 @@\n \n @property\n def respond(self):\n- return self.followup.send if self.response.is_done() else self.interaction.response.send_message\n+ if not self.response.is_done():\n+ return self.interaction.response.send_message\n+ else:\n+ raise RuntimeError(f\"Interaction was already issued a response. Try using {type(self).__name__}.send_followup() instead.\")\n+\n+ @property\n+ async def send_response(self) -> Callable[..., Union[Interaction, Webhook]]:\n+ \"\"\"Callable[..., Union[:class:`~.Interaction`, :class:`~.Webhook`]]: Sends either a response\n+ or a followup response depending if the interaction has been responded to yet or not.\"\"\"\n+ if not self.response.is_done():\n+ return self.interaction.response.send_message # self.response\n+ else:\n+ return self.followup.send # self.send_followup\n+\n+ @property\n+ def send_followup(self):\n+ if self.response.is_done():\n+ return self.followup.send\n+ else:\n+ raise RuntimeError(f\"Interaction was not yet issued a response. Try using {type(self).__name__}.respond() first.\")\n \n @property\n def defer(self):\n", "issue": "Redesign ApplicationContext.respond\n### Summary\n\nThe ApplicationContext.respond property needs to be redesigned.\n\n### What is the feature request for?\n\nThe core library\n\n### The Problem\n\nThe `ApplicationContext.respond` property returns 2 different functions depending on `InteractionResponse.is_done()`. Both take different parameters and return different objects. This is bad practice because it can result in unexpected behavior. Also, type-hinting is kinda hard. \n\n### The Ideal Solution\n\nIt might make sense to create a function `ApplicationContext.respond` that merges all parameters. Depending on `InteractionResponse.is_done()` it ignores the parameter of the other function. The only remaining problem is the different return types. This would simplify the situation, but isn't perfect yet. Maybe someone got a better idea.\n\n### The Current Solution\n\nA property that returns 2 different functions depending on `InteractionResponse.is_done()`.\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional, Union\n\nimport discord.abc\n\nif TYPE_CHECKING:\n import discord\n from discord import Bot\n from discord.state import ConnectionState\n\n from .commands import ApplicationCommand, Option\n from ..cog import Cog\n\nfrom ..guild import Guild\nfrom ..interactions import Interaction, InteractionResponse\nfrom ..member import Member\nfrom ..message import Message\nfrom ..user import User\nfrom ..utils import cached_property\n\n__all__ = (\n \"ApplicationContext\",\n \"AutocompleteContext\"\n)\n\nclass ApplicationContext(discord.abc.Messageable):\n \"\"\"Represents a Discord application command interaction context.\n\n This class is not created manually and is instead passed to application\n commands as the first parameter.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to.\n interaction: :class:`.Interaction`\n The interaction object that invoked the command.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n \"\"\"\n\n def __init__(self, bot: Bot, interaction: Interaction):\n self.bot = bot\n self.interaction = interaction\n\n # below attributes will be set after initialization\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n self._state: ConnectionState = self.interaction._state\n\n async def _get_channel(self) -> discord.abc.Messageable:\n return self.channel\n\n @cached_property\n def channel(self):\n return self.interaction.channel\n\n @cached_property\n def channel_id(self) -> Optional[int]:\n return self.interaction.channel_id\n\n @cached_property\n def guild(self) -> Optional[Guild]:\n return self.interaction.guild\n\n @cached_property\n def guild_id(self) -> Optional[int]:\n return self.interaction.guild_id\n\n @cached_property\n def locale(self) -> Optional[str]:\n return self.interaction.locale\n\n @cached_property\n def guild_locale(self) -> Optional[str]:\n return self.interaction.guild_locale\n\n @cached_property\n def me(self) -> Union[Member, User]:\n return self.guild.me if self.guild is not None else self.bot.user\n\n @cached_property\n def message(self) -> Optional[Message]:\n return self.interaction.message\n\n @cached_property\n def user(self) -> Optional[Union[Member, User]]:\n return self.interaction.user\n\n @cached_property\n def author(self) -> Optional[Union[Member, User]]:\n return self.user\n\n @property\n def voice_client(self):\n if self.guild is None:\n return None\n \n return self.guild.voice_client\n\n @cached_property\n def response(self) -> InteractionResponse:\n return self.interaction.response\n\n @property\n def respond(self):\n return self.followup.send if self.response.is_done() else self.interaction.response.send_message\n\n @property\n def defer(self):\n return self.interaction.response.defer\n\n @property\n def followup(self):\n return self.interaction.followup\n\n async def delete(self):\n \"\"\"Calls :attr:`~discord.commands.ApplicationContext.respond`.\n If the response is done, then calls :attr:`~discord.commands.ApplicationContext.respond` first.\"\"\"\n if not self.response.is_done():\n await self.defer()\n\n return await self.interaction.delete_original_message()\n\n @property\n def edit(self):\n return self.interaction.edit_original_message\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n\n\nclass AutocompleteContext:\n \"\"\"Represents context for a slash command's option autocomplete.\n\n This class is not created manually and is instead passed to an Option's autocomplete callback.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to. \n interaction: :class:`.Interaction`\n The interaction object that invoked the autocomplete.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n focused: :class:`.Option`\n The option the user is currently typing.\n value: :class:`.str`\n The content of the focused option.\n options :class:`.dict`\n A name to value mapping of the options that the user has selected before this option.\n \"\"\"\n\n __slots__ = (\"bot\", \"interaction\", \"command\", \"focused\", \"value\", \"options\")\n \n def __init__(self, bot: Bot, interaction: Interaction) -> None:\n self.bot = bot\n self.interaction = interaction\n\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n", "path": "discord/commands/context.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional, Union\n\nimport discord.abc\n\nif TYPE_CHECKING:\n import discord\n from discord import Bot\n from discord.state import ConnectionState\n\n from .commands import ApplicationCommand, Option\n from ..cog import Cog\n\nfrom ..guild import Guild\nfrom ..interactions import Interaction, InteractionResponse\nfrom ..member import Member\nfrom ..message import Message\nfrom ..user import User\nfrom ..utils import cached_property\n\n__all__ = (\n \"ApplicationContext\",\n \"AutocompleteContext\"\n)\n\nclass ApplicationContext(discord.abc.Messageable):\n \"\"\"Represents a Discord application command interaction context.\n\n This class is not created manually and is instead passed to application\n commands as the first parameter.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to.\n interaction: :class:`.Interaction`\n The interaction object that invoked the command.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n \"\"\"\n\n def __init__(self, bot: Bot, interaction: Interaction):\n self.bot = bot\n self.interaction = interaction\n\n # below attributes will be set after initialization\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n self._state: ConnectionState = self.interaction._state\n\n async def _get_channel(self) -> discord.abc.Messageable:\n return self.channel\n\n @cached_property\n def channel(self):\n return self.interaction.channel\n\n @cached_property\n def channel_id(self) -> Optional[int]:\n return self.interaction.channel_id\n\n @cached_property\n def guild(self) -> Optional[Guild]:\n return self.interaction.guild\n\n @cached_property\n def guild_id(self) -> Optional[int]:\n return self.interaction.guild_id\n\n @cached_property\n def locale(self) -> Optional[str]:\n return self.interaction.locale\n\n @cached_property\n def guild_locale(self) -> Optional[str]:\n return self.interaction.guild_locale\n\n @cached_property\n def me(self) -> Union[Member, User]:\n return self.guild.me if self.guild is not None else self.bot.user\n\n @cached_property\n def message(self) -> Optional[Message]:\n return self.interaction.message\n\n @cached_property\n def user(self) -> Optional[Union[Member, User]]:\n return self.interaction.user\n\n @cached_property\n def author(self) -> Optional[Union[Member, User]]:\n return self.user\n\n @property\n def voice_client(self):\n if self.guild is None:\n return None\n \n return self.guild.voice_client\n\n @cached_property\n def response(self) -> InteractionResponse:\n return self.interaction.response\n\n @property\n def respond(self):\n if not self.response.is_done():\n return self.interaction.response.send_message\n else:\n raise RuntimeError(f\"Interaction was already issued a response. Try using {type(self).__name__}.send_followup() instead.\")\n\n @property\n async def send_response(self) -> Callable[..., Union[Interaction, Webhook]]:\n \"\"\"Callable[..., Union[:class:`~.Interaction`, :class:`~.Webhook`]]: Sends either a response\n or a followup response depending if the interaction has been responded to yet or not.\"\"\"\n if not self.response.is_done():\n return self.interaction.response.send_message # self.response\n else:\n return self.followup.send # self.send_followup\n\n @property\n def send_followup(self):\n if self.response.is_done():\n return self.followup.send\n else:\n raise RuntimeError(f\"Interaction was not yet issued a response. Try using {type(self).__name__}.respond() first.\")\n\n @property\n def defer(self):\n return self.interaction.response.defer\n\n @property\n def followup(self):\n return self.interaction.followup\n\n async def delete(self):\n \"\"\"Calls :attr:`~discord.commands.ApplicationContext.respond`.\n If the response is done, then calls :attr:`~discord.commands.ApplicationContext.respond` first.\"\"\"\n if not self.response.is_done():\n await self.defer()\n\n return await self.interaction.delete_original_message()\n\n @property\n def edit(self):\n return self.interaction.edit_original_message\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n\n\nclass AutocompleteContext:\n \"\"\"Represents context for a slash command's option autocomplete.\n\n This class is not created manually and is instead passed to an Option's autocomplete callback.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to. \n interaction: :class:`.Interaction`\n The interaction object that invoked the autocomplete.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n focused: :class:`.Option`\n The option the user is currently typing.\n value: :class:`.str`\n The content of the focused option.\n options :class:`.dict`\n A name to value mapping of the options that the user has selected before this option.\n \"\"\"\n\n __slots__ = (\"bot\", \"interaction\", \"command\", \"focused\", \"value\", \"options\")\n \n def __init__(self, bot: Bot, interaction: Interaction) -> None:\n self.bot = bot\n self.interaction = interaction\n\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n", "path": "discord/commands/context.py"}]} | 2,440 | 314 |
gh_patches_debug_19915 | rasdani/github-patches | git_diff | apache__airflow-15247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tasks in an infinite slots pool are never scheduled
**Apache Airflow version**: v2.0.0 and up
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): not tested with K8
**Environment**:
all
**What happened**:
Executing the unit test included below, or create an infinite pool ( `-1` slots ) and tasks that should be executed in that pool.
```
INFO airflow.jobs.scheduler_job.SchedulerJob:scheduler_job.py:991 Not scheduling since there are -1 open slots in pool test_scheduler_verify_infinite_pool
```
**What you expected to happen**:
To schedule tasks, or to drop support for infinite slots pools?
**How to reproduce it**:
easiest one is this unit test:
```
def test_scheduler_verify_infinite_pool(self):
"""
Test that TIs are still scheduled if we only have one infinite pool.
"""
dag = DAG(dag_id='test_scheduler_verify_infinite_pool', start_date=DEFAULT_DATE)
BashOperator(
task_id='test_scheduler_verify_infinite_pool_t0',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_infinite_pool',
bash_command='echo hi',
)
dagbag = DagBag(
dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
read_dags_from_db=True,
)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_infinite_pool', slots=-1)
session.add(pool)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob(executor=self.null_exec)
scheduler.processor_agent = mock.MagicMock()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, {}, session)
task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
# Let's make sure we don't end up with a `max_tis` == 0
assert len(task_instances_list) >= 1
```
**Anything else we need to know**:
Overall I'm not sure whether it's worth fixing in those various spots:
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L908
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L971
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L988
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1041
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1056
Or whether to restrict `-1` ( infinite ) slots in pools:
https://github.com/bperson/airflow/blob/master/airflow/models/pool.py#L49
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/models/pool.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 from typing import Dict, Iterable, Optional, Tuple
20
21 from sqlalchemy import Column, Integer, String, Text, func
22 from sqlalchemy.orm.session import Session
23
24 from airflow.exceptions import AirflowException
25 from airflow.models.base import Base
26 from airflow.ti_deps.dependencies_states import EXECUTION_STATES
27 from airflow.typing_compat import TypedDict
28 from airflow.utils.session import provide_session
29 from airflow.utils.sqlalchemy import nowait, with_row_locks
30 from airflow.utils.state import State
31
32
33 class PoolStats(TypedDict):
34 """Dictionary containing Pool Stats"""
35
36 total: int
37 running: int
38 queued: int
39 open: int
40
41
42 class Pool(Base):
43 """the class to get Pool info."""
44
45 __tablename__ = "slot_pool"
46
47 id = Column(Integer, primary_key=True)
48 pool = Column(String(256), unique=True)
49 # -1 for infinite
50 slots = Column(Integer, default=0)
51 description = Column(Text)
52
53 DEFAULT_POOL_NAME = 'default_pool'
54
55 def __repr__(self):
56 return str(self.pool) # pylint: disable=E0012
57
58 @staticmethod
59 @provide_session
60 def get_pool(pool_name, session: Session = None):
61 """
62 Get the Pool with specific pool name from the Pools.
63
64 :param pool_name: The pool name of the Pool to get.
65 :param session: SQLAlchemy ORM Session
66 :return: the pool object
67 """
68 return session.query(Pool).filter(Pool.pool == pool_name).first()
69
70 @staticmethod
71 @provide_session
72 def get_default_pool(session: Session = None):
73 """
74 Get the Pool of the default_pool from the Pools.
75
76 :param session: SQLAlchemy ORM Session
77 :return: the pool object
78 """
79 return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)
80
81 @staticmethod
82 @provide_session
83 def slots_stats(
84 *,
85 lock_rows: bool = False,
86 session: Session = None,
87 ) -> Dict[str, PoolStats]:
88 """
89 Get Pool stats (Number of Running, Queued, Open & Total tasks)
90
91 If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a
92 non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an
93 OperationalError.
94
95 :param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns
96 :param session: SQLAlchemy ORM Session
97 """
98 from airflow.models.taskinstance import TaskInstance # Avoid circular import
99
100 pools: Dict[str, PoolStats] = {}
101
102 query = session.query(Pool.pool, Pool.slots)
103
104 if lock_rows:
105 query = with_row_locks(query, session=session, **nowait(session))
106
107 pool_rows: Iterable[Tuple[str, int]] = query.all()
108 for (pool_name, total_slots) in pool_rows:
109 pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
110
111 state_count_by_pool = (
112 session.query(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))
113 .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
114 .group_by(TaskInstance.pool, TaskInstance.state)
115 ).all()
116
117 # calculate queued and running metrics
118 count: int
119 for (pool_name, state, count) in state_count_by_pool:
120 stats_dict: Optional[PoolStats] = pools.get(pool_name)
121 if not stats_dict:
122 continue
123 # TypedDict key must be a string literal, so we use if-statements to set value
124 if state == "running":
125 stats_dict["running"] = count
126 elif state == "queued":
127 stats_dict["queued"] = count
128 else:
129 raise AirflowException(f"Unexpected state. Expected values: {EXECUTION_STATES}.")
130
131 # calculate open metric
132 for pool_name, stats_dict in pools.items():
133 if stats_dict["total"] == -1:
134 # -1 means infinite
135 stats_dict["open"] = -1
136 else:
137 stats_dict["open"] = stats_dict["total"] - stats_dict["running"] - stats_dict["queued"]
138
139 return pools
140
141 def to_json(self):
142 """
143 Get the Pool in a json structure
144
145 :return: the pool object in json format
146 """
147 return {
148 'id': self.id,
149 'pool': self.pool,
150 'slots': self.slots,
151 'description': self.description,
152 }
153
154 @provide_session
155 def occupied_slots(self, session: Session):
156 """
157 Get the number of slots used by running/queued tasks at the moment.
158
159 :param session: SQLAlchemy ORM Session
160 :return: the used number of slots
161 """
162 from airflow.models.taskinstance import TaskInstance # Avoid circular import
163
164 return int(
165 session.query(func.sum(TaskInstance.pool_slots))
166 .filter(TaskInstance.pool == self.pool)
167 .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
168 .scalar()
169 or 0
170 )
171
172 @provide_session
173 def running_slots(self, session: Session):
174 """
175 Get the number of slots used by running tasks at the moment.
176
177 :param session: SQLAlchemy ORM Session
178 :return: the used number of slots
179 """
180 from airflow.models.taskinstance import TaskInstance # Avoid circular import
181
182 return int(
183 session.query(func.sum(TaskInstance.pool_slots))
184 .filter(TaskInstance.pool == self.pool)
185 .filter(TaskInstance.state == State.RUNNING)
186 .scalar()
187 or 0
188 )
189
190 @provide_session
191 def queued_slots(self, session: Session):
192 """
193 Get the number of slots used by queued tasks at the moment.
194
195 :param session: SQLAlchemy ORM Session
196 :return: the used number of slots
197 """
198 from airflow.models.taskinstance import TaskInstance # Avoid circular import
199
200 return int(
201 session.query(func.sum(TaskInstance.pool_slots))
202 .filter(TaskInstance.pool == self.pool)
203 .filter(TaskInstance.state == State.QUEUED)
204 .scalar()
205 or 0
206 )
207
208 @provide_session
209 def open_slots(self, session: Session) -> float:
210 """
211 Get the number of slots open at the moment.
212
213 :param session: SQLAlchemy ORM Session
214 :return: the number of slots
215 """
216 if self.slots == -1:
217 return float('inf')
218 else:
219 return self.slots - self.occupied_slots(session)
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/models/pool.py b/airflow/models/pool.py
--- a/airflow/models/pool.py
+++ b/airflow/models/pool.py
@@ -106,6 +106,8 @@
pool_rows: Iterable[Tuple[str, int]] = query.all()
for (pool_name, total_slots) in pool_rows:
+ if total_slots == -1:
+ total_slots = float('inf') # type: ignore
pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
state_count_by_pool = (
@@ -115,8 +117,10 @@
).all()
# calculate queued and running metrics
- count: int
for (pool_name, state, count) in state_count_by_pool:
+ # Some databases return decimal.Decimal here.
+ count = int(count)
+
stats_dict: Optional[PoolStats] = pools.get(pool_name)
if not stats_dict:
continue
| {"golden_diff": "diff --git a/airflow/models/pool.py b/airflow/models/pool.py\n--- a/airflow/models/pool.py\n+++ b/airflow/models/pool.py\n@@ -106,6 +106,8 @@\n \n pool_rows: Iterable[Tuple[str, int]] = query.all()\n for (pool_name, total_slots) in pool_rows:\n+ if total_slots == -1:\n+ total_slots = float('inf') # type: ignore\n pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)\n \n state_count_by_pool = (\n@@ -115,8 +117,10 @@\n ).all()\n \n # calculate queued and running metrics\n- count: int\n for (pool_name, state, count) in state_count_by_pool:\n+ # Some databases return decimal.Decimal here.\n+ count = int(count)\n+\n stats_dict: Optional[PoolStats] = pools.get(pool_name)\n if not stats_dict:\n continue\n", "issue": "Tasks in an infinite slots pool are never scheduled\n**Apache Airflow version**: v2.0.0 and up\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): not tested with K8\r\n**Environment**:\r\nall\r\n\r\n**What happened**:\r\n\r\nExecuting the unit test included below, or create an infinite pool ( `-1` slots ) and tasks that should be executed in that pool.\r\n```\r\nINFO airflow.jobs.scheduler_job.SchedulerJob:scheduler_job.py:991 Not scheduling since there are -1 open slots in pool test_scheduler_verify_infinite_pool\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nTo schedule tasks, or to drop support for infinite slots pools?\r\n\r\n**How to reproduce it**:\r\neasiest one is this unit test:\r\n```\r\ndef test_scheduler_verify_infinite_pool(self):\r\n \"\"\"\r\n Test that TIs are still scheduled if we only have one infinite pool.\r\n \"\"\"\r\n dag = DAG(dag_id='test_scheduler_verify_infinite_pool', start_date=DEFAULT_DATE)\r\n BashOperator(\r\n task_id='test_scheduler_verify_infinite_pool_t0',\r\n dag=dag,\r\n owner='airflow',\r\n pool='test_scheduler_verify_infinite_pool',\r\n bash_command='echo hi',\r\n )\r\n\r\n dagbag = DagBag(\r\n dag_folder=os.path.join(settings.DAGS_FOLDER, \"no_dags.py\"),\r\n include_examples=False,\r\n read_dags_from_db=True,\r\n )\r\n dagbag.bag_dag(dag=dag, root_dag=dag)\r\n dagbag.sync_to_db()\r\n\r\n session = settings.Session()\r\n pool = Pool(pool='test_scheduler_verify_infinite_pool', slots=-1)\r\n session.add(pool)\r\n session.commit()\r\n\r\n dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))\r\n\r\n scheduler = SchedulerJob(executor=self.null_exec)\r\n scheduler.processor_agent = mock.MagicMock()\r\n\r\n dr = dag.create_dagrun(\r\n run_type=DagRunType.SCHEDULED,\r\n execution_date=DEFAULT_DATE,\r\n state=State.RUNNING,\r\n )\r\n scheduler._schedule_dag_run(dr, {}, session)\r\n\r\n task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)\r\n\r\n # Let's make sure we don't end up with a `max_tis` == 0\r\n assert len(task_instances_list) >= 1\r\n```\r\n\r\n**Anything else we need to know**:\r\n\r\nOverall I'm not sure whether it's worth fixing in those various spots:\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L908\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L971\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L988\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1041\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1056\r\n\r\nOr whether to restrict `-1` ( infinite ) slots in pools:\r\nhttps://github.com/bperson/airflow/blob/master/airflow/models/pool.py#L49\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import Dict, Iterable, Optional, Tuple\n\nfrom sqlalchemy import Column, Integer, String, Text, func\nfrom sqlalchemy.orm.session import Session\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.models.base import Base\nfrom airflow.ti_deps.dependencies_states import EXECUTION_STATES\nfrom airflow.typing_compat import TypedDict\nfrom airflow.utils.session import provide_session\nfrom airflow.utils.sqlalchemy import nowait, with_row_locks\nfrom airflow.utils.state import State\n\n\nclass PoolStats(TypedDict):\n \"\"\"Dictionary containing Pool Stats\"\"\"\n\n total: int\n running: int\n queued: int\n open: int\n\n\nclass Pool(Base):\n \"\"\"the class to get Pool info.\"\"\"\n\n __tablename__ = \"slot_pool\"\n\n id = Column(Integer, primary_key=True)\n pool = Column(String(256), unique=True)\n # -1 for infinite\n slots = Column(Integer, default=0)\n description = Column(Text)\n\n DEFAULT_POOL_NAME = 'default_pool'\n\n def __repr__(self):\n return str(self.pool) # pylint: disable=E0012\n\n @staticmethod\n @provide_session\n def get_pool(pool_name, session: Session = None):\n \"\"\"\n Get the Pool with specific pool name from the Pools.\n\n :param pool_name: The pool name of the Pool to get.\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return session.query(Pool).filter(Pool.pool == pool_name).first()\n\n @staticmethod\n @provide_session\n def get_default_pool(session: Session = None):\n \"\"\"\n Get the Pool of the default_pool from the Pools.\n\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)\n\n @staticmethod\n @provide_session\n def slots_stats(\n *,\n lock_rows: bool = False,\n session: Session = None,\n ) -> Dict[str, PoolStats]:\n \"\"\"\n Get Pool stats (Number of Running, Queued, Open & Total tasks)\n\n If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a\n non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an\n OperationalError.\n\n :param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns\n :param session: SQLAlchemy ORM Session\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n pools: Dict[str, PoolStats] = {}\n\n query = session.query(Pool.pool, Pool.slots)\n\n if lock_rows:\n query = with_row_locks(query, session=session, **nowait(session))\n\n pool_rows: Iterable[Tuple[str, int]] = query.all()\n for (pool_name, total_slots) in pool_rows:\n pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)\n\n state_count_by_pool = (\n session.query(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .group_by(TaskInstance.pool, TaskInstance.state)\n ).all()\n\n # calculate queued and running metrics\n count: int\n for (pool_name, state, count) in state_count_by_pool:\n stats_dict: Optional[PoolStats] = pools.get(pool_name)\n if not stats_dict:\n continue\n # TypedDict key must be a string literal, so we use if-statements to set value\n if state == \"running\":\n stats_dict[\"running\"] = count\n elif state == \"queued\":\n stats_dict[\"queued\"] = count\n else:\n raise AirflowException(f\"Unexpected state. Expected values: {EXECUTION_STATES}.\")\n\n # calculate open metric\n for pool_name, stats_dict in pools.items():\n if stats_dict[\"total\"] == -1:\n # -1 means infinite\n stats_dict[\"open\"] = -1\n else:\n stats_dict[\"open\"] = stats_dict[\"total\"] - stats_dict[\"running\"] - stats_dict[\"queued\"]\n\n return pools\n\n def to_json(self):\n \"\"\"\n Get the Pool in a json structure\n\n :return: the pool object in json format\n \"\"\"\n return {\n 'id': self.id,\n 'pool': self.pool,\n 'slots': self.slots,\n 'description': self.description,\n }\n\n @provide_session\n def occupied_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running/queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .scalar()\n or 0\n )\n\n @provide_session\n def running_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.RUNNING)\n .scalar()\n or 0\n )\n\n @provide_session\n def queued_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.QUEUED)\n .scalar()\n or 0\n )\n\n @provide_session\n def open_slots(self, session: Session) -> float:\n \"\"\"\n Get the number of slots open at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the number of slots\n \"\"\"\n if self.slots == -1:\n return float('inf')\n else:\n return self.slots - self.occupied_slots(session)\n", "path": "airflow/models/pool.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import Dict, Iterable, Optional, Tuple\n\nfrom sqlalchemy import Column, Integer, String, Text, func\nfrom sqlalchemy.orm.session import Session\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.models.base import Base\nfrom airflow.ti_deps.dependencies_states import EXECUTION_STATES\nfrom airflow.typing_compat import TypedDict\nfrom airflow.utils.session import provide_session\nfrom airflow.utils.sqlalchemy import nowait, with_row_locks\nfrom airflow.utils.state import State\n\n\nclass PoolStats(TypedDict):\n \"\"\"Dictionary containing Pool Stats\"\"\"\n\n total: int\n running: int\n queued: int\n open: int\n\n\nclass Pool(Base):\n \"\"\"the class to get Pool info.\"\"\"\n\n __tablename__ = \"slot_pool\"\n\n id = Column(Integer, primary_key=True)\n pool = Column(String(256), unique=True)\n # -1 for infinite\n slots = Column(Integer, default=0)\n description = Column(Text)\n\n DEFAULT_POOL_NAME = 'default_pool'\n\n def __repr__(self):\n return str(self.pool) # pylint: disable=E0012\n\n @staticmethod\n @provide_session\n def get_pool(pool_name, session: Session = None):\n \"\"\"\n Get the Pool with specific pool name from the Pools.\n\n :param pool_name: The pool name of the Pool to get.\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return session.query(Pool).filter(Pool.pool == pool_name).first()\n\n @staticmethod\n @provide_session\n def get_default_pool(session: Session = None):\n \"\"\"\n Get the Pool of the default_pool from the Pools.\n\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)\n\n @staticmethod\n @provide_session\n def slots_stats(\n *,\n lock_rows: bool = False,\n session: Session = None,\n ) -> Dict[str, PoolStats]:\n \"\"\"\n Get Pool stats (Number of Running, Queued, Open & Total tasks)\n\n If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a\n non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an\n OperationalError.\n\n :param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns\n :param session: SQLAlchemy ORM Session\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n pools: Dict[str, PoolStats] = {}\n\n query = session.query(Pool.pool, Pool.slots)\n\n if lock_rows:\n query = with_row_locks(query, session=session, **nowait(session))\n\n pool_rows: Iterable[Tuple[str, int]] = query.all()\n for (pool_name, total_slots) in pool_rows:\n if total_slots == -1:\n total_slots = float('inf') # type: ignore\n pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)\n\n state_count_by_pool = (\n session.query(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .group_by(TaskInstance.pool, TaskInstance.state)\n ).all()\n\n # calculate queued and running metrics\n for (pool_name, state, count) in state_count_by_pool:\n # Some databases return decimal.Decimal here.\n count = int(count)\n\n stats_dict: Optional[PoolStats] = pools.get(pool_name)\n if not stats_dict:\n continue\n # TypedDict key must be a string literal, so we use if-statements to set value\n if state == \"running\":\n stats_dict[\"running\"] = count\n elif state == \"queued\":\n stats_dict[\"queued\"] = count\n else:\n raise AirflowException(f\"Unexpected state. Expected values: {EXECUTION_STATES}.\")\n\n # calculate open metric\n for pool_name, stats_dict in pools.items():\n if stats_dict[\"total\"] == -1:\n # -1 means infinite\n stats_dict[\"open\"] = -1\n else:\n stats_dict[\"open\"] = stats_dict[\"total\"] - stats_dict[\"running\"] - stats_dict[\"queued\"]\n\n return pools\n\n def to_json(self):\n \"\"\"\n Get the Pool in a json structure\n\n :return: the pool object in json format\n \"\"\"\n return {\n 'id': self.id,\n 'pool': self.pool,\n 'slots': self.slots,\n 'description': self.description,\n }\n\n @provide_session\n def occupied_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running/queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .scalar()\n or 0\n )\n\n @provide_session\n def running_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.RUNNING)\n .scalar()\n or 0\n )\n\n @provide_session\n def queued_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.QUEUED)\n .scalar()\n or 0\n )\n\n @provide_session\n def open_slots(self, session: Session) -> float:\n \"\"\"\n Get the number of slots open at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the number of slots\n \"\"\"\n if self.slots == -1:\n return float('inf')\n else:\n return self.slots - self.occupied_slots(session)\n", "path": "airflow/models/pool.py"}]} | 3,118 | 233 |
gh_patches_debug_6036 | rasdani/github-patches | git_diff | sunpy__sunpy-2824 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
show_colormaps() is not big enough to show the colormap names
See attached picture. This affects the gallery too - see http://docs.sunpy.org/en/stable/generated/gallery/plotting/sunpy_colormaps_reference.html#sphx-glr-generated-gallery-plotting-sunpy-colormaps-reference-py

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/cm/cm.py`
Content:
```
1 """
2 This module provides a set of colormaps specific for solar data.
3 """
4 from __future__ import absolute_import, division, print_function
5
6 from copy import deepcopy
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10 import matplotlib.cm as mplcm
11
12 from sunpy.cm import color_tables as ct
13 from sunpy.util import deprecated
14
15 __all__ = ['get_cmap', 'show_colormaps', 'cmlist']
16
17 sdoaia94 = ct.aia_color_table(94)
18 sdoaia131 = ct.aia_color_table(131)
19 sdoaia171 = ct.aia_color_table(171)
20 sdoaia193 = ct.aia_color_table(193)
21 sdoaia211 = ct.aia_color_table(211)
22 sdoaia304 = ct.aia_color_table(304)
23 sdoaia335 = ct.aia_color_table(335)
24 sdoaia1600 = ct.aia_color_table(1600)
25 sdoaia1700 = ct.aia_color_table(1700)
26 sdoaia4500 = ct.aia_color_table(4500)
27
28 sohoeit171 = ct.eit_color_table(171)
29 sohoeit195 = ct.eit_color_table(195)
30 sohoeit284 = ct.eit_color_table(284)
31 sohoeit304 = ct.eit_color_table(304)
32
33 # The color tables below returns one of the fundamental color tables for SOHO
34 # LASCO images. These are not the same as those used in SSWIDL. This is
35 # because the SSWIDL color scaling for LASCO level 0.5 and 1.0 is highly
36 # compressed and does not display the data well.
37 soholasco2 = deepcopy(mplcm.get_cmap("gist_heat"))
38 soholasco2.name = 'SOHO LASCO C2'
39 soholasco3 = deepcopy(mplcm.get_cmap("bone"))
40 soholasco3.name = 'SOHO LASCO C3'
41
42 # These are the SSWIDL color tables.
43 sswidlsoholasco2 = ct.sswidl_lasco_color_table(2)
44 sswidlsoholasco3 = ct.sswidl_lasco_color_table(3)
45
46 stereocor1 = ct.cor_color_table(1)
47 stereocor2 = ct.cor_color_table(2)
48
49 stereohi1 = ct.stereo_hi_color_table(1)
50 stereohi2 = ct.stereo_hi_color_table(2)
51
52 yohkohsxtal = ct.sxt_color_table('al')
53 yohkohsxtwh = ct.sxt_color_table('wh')
54
55 hinodexrt = ct.xrt_color_table()
56 hinodesotintensity = ct.sot_color_table('intensity')
57
58 trace171 = ct.trace_color_table('171')
59 trace195 = ct.trace_color_table('195')
60 trace284 = ct.trace_color_table('284')
61 trace1216 = ct.trace_color_table('1216')
62 trace1550 = ct.trace_color_table('1550')
63 trace1600 = ct.trace_color_table('1600')
64 trace1700 = ct.trace_color_table('1700')
65 traceWL = ct.trace_color_table('WL')
66
67 hmimag = ct.hmi_mag_color_table()
68
69 cmlist = {
70 'sdoaia94': sdoaia94,
71 'sdoaia131': sdoaia131,
72 'sdoaia171': sdoaia171,
73 'sdoaia193': sdoaia193,
74 'sdoaia211': sdoaia211,
75 'sdoaia304': sdoaia304,
76 'sdoaia335': sdoaia335,
77 'sdoaia1600': sdoaia1600,
78 'sdoaia1700': sdoaia1700,
79 'sdoaia4500': sdoaia4500,
80 'sohoeit171': sohoeit171,
81 'sohoeit195': sohoeit195,
82 'sohoeit284': sohoeit284,
83 'sohoeit304': sohoeit304,
84 'soholasco2': soholasco2,
85 'soholasco3': soholasco3,
86 'sswidlsoholasco2': sswidlsoholasco2,
87 'sswidlsoholasco3': sswidlsoholasco3,
88 'stereocor1': stereocor1,
89 'stereocor2': stereocor2,
90 'stereohi1': stereohi1,
91 'stereohi2': stereohi2,
92 'rhessi': mplcm.jet,
93 'yohkohsxtal': yohkohsxtal,
94 'yohkohsxtwh': yohkohsxtwh,
95 'hinodexrt': hinodexrt,
96 'hinodesotintensity': hinodesotintensity,
97 'trace171': trace171,
98 'trace195': trace195,
99 'trace284': trace284,
100 'trace1216': trace1216,
101 'trace1550': trace1550,
102 'trace1600': trace1600,
103 'trace1700': trace1700,
104 'traceWL': traceWL,
105 'hmimag': hmimag,
106 'irissji1330': ct.iris_sji_color_table('1330'),
107 'irissji1400': ct.iris_sji_color_table('1400'),
108 'irissji1600': ct.iris_sji_color_table('1600'),
109 'irissji2796': ct.iris_sji_color_table('2796'),
110 'irissji2832': ct.iris_sji_color_table('2832'),
111 'irissji5000': ct.iris_sji_color_table('5000'),
112 'irissjiFUV': ct.iris_sji_color_table('FUV'),
113 'irissjiNUV': ct.iris_sji_color_table('NUV'),
114 'irissjiSJI_NUV': ct.iris_sji_color_table('SJI_NUV')
115 }
116
117 # Register the colormaps with matplotlib so plt.get_cmap('sdoaia171') works
118 for name, cmap in cmlist.items():
119 mplcm.register_cmap(name=name, cmap=cmap)
120
121
122 @deprecated("0.9",
123 "'sunpy.cm.get_cmap' is dprecated, use 'plt.get_cmap' from Matplotlib "
124 "to load the colormaps instead.",
125 alternative='plt.get_cmap')
126 def get_cmap(name):
127 """
128 Get a colormap.
129
130 Parameters
131 ----------
132 name : string
133 The name of a color map.
134
135 Returns
136 -------
137 value : matplotlib colormap
138
139 See Also
140 --------
141
142 Examples
143 --------
144 >>> import sunpy.cm as cm
145 >>> colormap = cm.get_cmap(name = 'sdoaia94')
146
147 References
148 ----------
149 | https://matplotlib.org/api/cm_api.html
150
151 """
152 if name in cmlist:
153 return cmlist.get(name)
154 else:
155 raise ValueError("Colormap {name!s} is not recognized".format(name=name))
156
157
158 def show_colormaps(search=None):
159 """Displays a plot of the custom color maps supported in SunPy.
160
161 Parameters
162 ----------
163 search : str
164 A string to search for in the names of the color maps (e.g. aia, EIT,
165 171). Case insensitive.
166
167 Returns
168 -------
169 None : none
170
171 Examples
172 --------
173 >>> import sunpy.cm as cm
174 >>> cm.show_colormaps()
175 >>> cm.show_colormaps(search='aia')
176 >>> cm.show_colormaps(search='171')
177
178 References
179 ----------
180
181 """
182
183 if search is not None:
184 maps = sorted({k: v for (k, v) in cmlist.items() if k.lower().count(search.lower())})
185 if len(maps) == 0:
186 raise KeyError('No color maps found for search term "{:s}"'.format(search))
187 else:
188 maps = sorted(cmlist)
189
190 nmaps = len(maps) + 1
191
192 a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103
193 a = np.vstack((a, a))
194
195 fig = plt.figure(figsize=(5, 10),dpi=64)
196 fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)
197 for i, name in enumerate(maps):
198 ax = plt.subplot(nmaps, 1, i + 1)
199 plt.axis("off")
200 plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')
201 pos = list(ax.get_position().bounds)
202 fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,
203 horizontalalignment='right')
204 plt.show()
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/cm/cm.py b/sunpy/cm/cm.py
--- a/sunpy/cm/cm.py
+++ b/sunpy/cm/cm.py
@@ -192,7 +192,7 @@
a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103
a = np.vstack((a, a))
- fig = plt.figure(figsize=(5, 10),dpi=64)
+ fig = plt.figure(figsize=(7, 10),dpi=128)
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)
for i, name in enumerate(maps):
ax = plt.subplot(nmaps, 1, i + 1)
| {"golden_diff": "diff --git a/sunpy/cm/cm.py b/sunpy/cm/cm.py\n--- a/sunpy/cm/cm.py\n+++ b/sunpy/cm/cm.py\n@@ -192,7 +192,7 @@\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n \n- fig = plt.figure(figsize=(5, 10),dpi=64)\n+ fig = plt.figure(figsize=(7, 10),dpi=128)\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n", "issue": "show_colormaps() is not big enough to show the colormap names\nSee attached picture. This affects the gallery too - see http://docs.sunpy.org/en/stable/generated/gallery/plotting/sunpy_colormaps_reference.html#sphx-glr-generated-gallery-plotting-sunpy-colormaps-reference-py\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis module provides a set of colormaps specific for solar data.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mplcm\n\nfrom sunpy.cm import color_tables as ct\nfrom sunpy.util import deprecated\n\n__all__ = ['get_cmap', 'show_colormaps', 'cmlist']\n\nsdoaia94 = ct.aia_color_table(94)\nsdoaia131 = ct.aia_color_table(131)\nsdoaia171 = ct.aia_color_table(171)\nsdoaia193 = ct.aia_color_table(193)\nsdoaia211 = ct.aia_color_table(211)\nsdoaia304 = ct.aia_color_table(304)\nsdoaia335 = ct.aia_color_table(335)\nsdoaia1600 = ct.aia_color_table(1600)\nsdoaia1700 = ct.aia_color_table(1700)\nsdoaia4500 = ct.aia_color_table(4500)\n\nsohoeit171 = ct.eit_color_table(171)\nsohoeit195 = ct.eit_color_table(195)\nsohoeit284 = ct.eit_color_table(284)\nsohoeit304 = ct.eit_color_table(304)\n\n# The color tables below returns one of the fundamental color tables for SOHO\n# LASCO images. These are not the same as those used in SSWIDL. This is\n# because the SSWIDL color scaling for LASCO level 0.5 and 1.0 is highly\n# compressed and does not display the data well.\nsoholasco2 = deepcopy(mplcm.get_cmap(\"gist_heat\"))\nsoholasco2.name = 'SOHO LASCO C2'\nsoholasco3 = deepcopy(mplcm.get_cmap(\"bone\"))\nsoholasco3.name = 'SOHO LASCO C3'\n\n# These are the SSWIDL color tables.\nsswidlsoholasco2 = ct.sswidl_lasco_color_table(2)\nsswidlsoholasco3 = ct.sswidl_lasco_color_table(3)\n\nstereocor1 = ct.cor_color_table(1)\nstereocor2 = ct.cor_color_table(2)\n\nstereohi1 = ct.stereo_hi_color_table(1)\nstereohi2 = ct.stereo_hi_color_table(2)\n\nyohkohsxtal = ct.sxt_color_table('al')\nyohkohsxtwh = ct.sxt_color_table('wh')\n\nhinodexrt = ct.xrt_color_table()\nhinodesotintensity = ct.sot_color_table('intensity')\n\ntrace171 = ct.trace_color_table('171')\ntrace195 = ct.trace_color_table('195')\ntrace284 = ct.trace_color_table('284')\ntrace1216 = ct.trace_color_table('1216')\ntrace1550 = ct.trace_color_table('1550')\ntrace1600 = ct.trace_color_table('1600')\ntrace1700 = ct.trace_color_table('1700')\ntraceWL = ct.trace_color_table('WL')\n\nhmimag = ct.hmi_mag_color_table()\n\ncmlist = {\n 'sdoaia94': sdoaia94,\n 'sdoaia131': sdoaia131,\n 'sdoaia171': sdoaia171,\n 'sdoaia193': sdoaia193,\n 'sdoaia211': sdoaia211,\n 'sdoaia304': sdoaia304,\n 'sdoaia335': sdoaia335,\n 'sdoaia1600': sdoaia1600,\n 'sdoaia1700': sdoaia1700,\n 'sdoaia4500': sdoaia4500,\n 'sohoeit171': sohoeit171,\n 'sohoeit195': sohoeit195,\n 'sohoeit284': sohoeit284,\n 'sohoeit304': sohoeit304,\n 'soholasco2': soholasco2,\n 'soholasco3': soholasco3,\n 'sswidlsoholasco2': sswidlsoholasco2,\n 'sswidlsoholasco3': sswidlsoholasco3,\n 'stereocor1': stereocor1,\n 'stereocor2': stereocor2,\n 'stereohi1': stereohi1,\n 'stereohi2': stereohi2,\n 'rhessi': mplcm.jet,\n 'yohkohsxtal': yohkohsxtal,\n 'yohkohsxtwh': yohkohsxtwh,\n 'hinodexrt': hinodexrt,\n 'hinodesotintensity': hinodesotintensity,\n 'trace171': trace171,\n 'trace195': trace195,\n 'trace284': trace284,\n 'trace1216': trace1216,\n 'trace1550': trace1550,\n 'trace1600': trace1600,\n 'trace1700': trace1700,\n 'traceWL': traceWL,\n 'hmimag': hmimag,\n 'irissji1330': ct.iris_sji_color_table('1330'),\n 'irissji1400': ct.iris_sji_color_table('1400'),\n 'irissji1600': ct.iris_sji_color_table('1600'),\n 'irissji2796': ct.iris_sji_color_table('2796'),\n 'irissji2832': ct.iris_sji_color_table('2832'),\n 'irissji5000': ct.iris_sji_color_table('5000'),\n 'irissjiFUV': ct.iris_sji_color_table('FUV'),\n 'irissjiNUV': ct.iris_sji_color_table('NUV'),\n 'irissjiSJI_NUV': ct.iris_sji_color_table('SJI_NUV')\n}\n\n# Register the colormaps with matplotlib so plt.get_cmap('sdoaia171') works\nfor name, cmap in cmlist.items():\n mplcm.register_cmap(name=name, cmap=cmap)\n\n\n@deprecated(\"0.9\",\n \"'sunpy.cm.get_cmap' is dprecated, use 'plt.get_cmap' from Matplotlib \"\n \"to load the colormaps instead.\",\n alternative='plt.get_cmap')\ndef get_cmap(name):\n \"\"\"\n Get a colormap.\n\n Parameters\n ----------\n name : string\n The name of a color map.\n\n Returns\n -------\n value : matplotlib colormap\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> colormap = cm.get_cmap(name = 'sdoaia94')\n\n References\n ----------\n | https://matplotlib.org/api/cm_api.html\n\n \"\"\"\n if name in cmlist:\n return cmlist.get(name)\n else:\n raise ValueError(\"Colormap {name!s} is not recognized\".format(name=name))\n\n\ndef show_colormaps(search=None):\n \"\"\"Displays a plot of the custom color maps supported in SunPy.\n\n Parameters\n ----------\n search : str\n A string to search for in the names of the color maps (e.g. aia, EIT,\n 171). Case insensitive.\n\n Returns\n -------\n None : none\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> cm.show_colormaps()\n >>> cm.show_colormaps(search='aia')\n >>> cm.show_colormaps(search='171')\n\n References\n ----------\n\n \"\"\"\n\n if search is not None:\n maps = sorted({k: v for (k, v) in cmlist.items() if k.lower().count(search.lower())})\n if len(maps) == 0:\n raise KeyError('No color maps found for search term \"{:s}\"'.format(search))\n else:\n maps = sorted(cmlist)\n\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10),dpi=64)\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n plt.show()\n", "path": "sunpy/cm/cm.py"}], "after_files": [{"content": "\"\"\"\nThis module provides a set of colormaps specific for solar data.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mplcm\n\nfrom sunpy.cm import color_tables as ct\nfrom sunpy.util import deprecated\n\n__all__ = ['get_cmap', 'show_colormaps', 'cmlist']\n\nsdoaia94 = ct.aia_color_table(94)\nsdoaia131 = ct.aia_color_table(131)\nsdoaia171 = ct.aia_color_table(171)\nsdoaia193 = ct.aia_color_table(193)\nsdoaia211 = ct.aia_color_table(211)\nsdoaia304 = ct.aia_color_table(304)\nsdoaia335 = ct.aia_color_table(335)\nsdoaia1600 = ct.aia_color_table(1600)\nsdoaia1700 = ct.aia_color_table(1700)\nsdoaia4500 = ct.aia_color_table(4500)\n\nsohoeit171 = ct.eit_color_table(171)\nsohoeit195 = ct.eit_color_table(195)\nsohoeit284 = ct.eit_color_table(284)\nsohoeit304 = ct.eit_color_table(304)\n\n# The color tables below returns one of the fundamental color tables for SOHO\n# LASCO images. These are not the same as those used in SSWIDL. This is\n# because the SSWIDL color scaling for LASCO level 0.5 and 1.0 is highly\n# compressed and does not display the data well.\nsoholasco2 = deepcopy(mplcm.get_cmap(\"gist_heat\"))\nsoholasco2.name = 'SOHO LASCO C2'\nsoholasco3 = deepcopy(mplcm.get_cmap(\"bone\"))\nsoholasco3.name = 'SOHO LASCO C3'\n\n# These are the SSWIDL color tables.\nsswidlsoholasco2 = ct.sswidl_lasco_color_table(2)\nsswidlsoholasco3 = ct.sswidl_lasco_color_table(3)\n\nstereocor1 = ct.cor_color_table(1)\nstereocor2 = ct.cor_color_table(2)\n\nstereohi1 = ct.stereo_hi_color_table(1)\nstereohi2 = ct.stereo_hi_color_table(2)\n\nyohkohsxtal = ct.sxt_color_table('al')\nyohkohsxtwh = ct.sxt_color_table('wh')\n\nhinodexrt = ct.xrt_color_table()\nhinodesotintensity = ct.sot_color_table('intensity')\n\ntrace171 = ct.trace_color_table('171')\ntrace195 = ct.trace_color_table('195')\ntrace284 = ct.trace_color_table('284')\ntrace1216 = ct.trace_color_table('1216')\ntrace1550 = ct.trace_color_table('1550')\ntrace1600 = ct.trace_color_table('1600')\ntrace1700 = ct.trace_color_table('1700')\ntraceWL = ct.trace_color_table('WL')\n\nhmimag = ct.hmi_mag_color_table()\n\ncmlist = {\n 'sdoaia94': sdoaia94,\n 'sdoaia131': sdoaia131,\n 'sdoaia171': sdoaia171,\n 'sdoaia193': sdoaia193,\n 'sdoaia211': sdoaia211,\n 'sdoaia304': sdoaia304,\n 'sdoaia335': sdoaia335,\n 'sdoaia1600': sdoaia1600,\n 'sdoaia1700': sdoaia1700,\n 'sdoaia4500': sdoaia4500,\n 'sohoeit171': sohoeit171,\n 'sohoeit195': sohoeit195,\n 'sohoeit284': sohoeit284,\n 'sohoeit304': sohoeit304,\n 'soholasco2': soholasco2,\n 'soholasco3': soholasco3,\n 'sswidlsoholasco2': sswidlsoholasco2,\n 'sswidlsoholasco3': sswidlsoholasco3,\n 'stereocor1': stereocor1,\n 'stereocor2': stereocor2,\n 'stereohi1': stereohi1,\n 'stereohi2': stereohi2,\n 'rhessi': mplcm.jet,\n 'yohkohsxtal': yohkohsxtal,\n 'yohkohsxtwh': yohkohsxtwh,\n 'hinodexrt': hinodexrt,\n 'hinodesotintensity': hinodesotintensity,\n 'trace171': trace171,\n 'trace195': trace195,\n 'trace284': trace284,\n 'trace1216': trace1216,\n 'trace1550': trace1550,\n 'trace1600': trace1600,\n 'trace1700': trace1700,\n 'traceWL': traceWL,\n 'hmimag': hmimag,\n 'irissji1330': ct.iris_sji_color_table('1330'),\n 'irissji1400': ct.iris_sji_color_table('1400'),\n 'irissji1600': ct.iris_sji_color_table('1600'),\n 'irissji2796': ct.iris_sji_color_table('2796'),\n 'irissji2832': ct.iris_sji_color_table('2832'),\n 'irissji5000': ct.iris_sji_color_table('5000'),\n 'irissjiFUV': ct.iris_sji_color_table('FUV'),\n 'irissjiNUV': ct.iris_sji_color_table('NUV'),\n 'irissjiSJI_NUV': ct.iris_sji_color_table('SJI_NUV')\n}\n\n# Register the colormaps with matplotlib so plt.get_cmap('sdoaia171') works\nfor name, cmap in cmlist.items():\n mplcm.register_cmap(name=name, cmap=cmap)\n\n\n@deprecated(\"0.9\",\n \"'sunpy.cm.get_cmap' is dprecated, use 'plt.get_cmap' from Matplotlib \"\n \"to load the colormaps instead.\",\n alternative='plt.get_cmap')\ndef get_cmap(name):\n \"\"\"\n Get a colormap.\n\n Parameters\n ----------\n name : string\n The name of a color map.\n\n Returns\n -------\n value : matplotlib colormap\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> colormap = cm.get_cmap(name = 'sdoaia94')\n\n References\n ----------\n | https://matplotlib.org/api/cm_api.html\n\n \"\"\"\n if name in cmlist:\n return cmlist.get(name)\n else:\n raise ValueError(\"Colormap {name!s} is not recognized\".format(name=name))\n\n\ndef show_colormaps(search=None):\n \"\"\"Displays a plot of the custom color maps supported in SunPy.\n\n Parameters\n ----------\n search : str\n A string to search for in the names of the color maps (e.g. aia, EIT,\n 171). Case insensitive.\n\n Returns\n -------\n None : none\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> cm.show_colormaps()\n >>> cm.show_colormaps(search='aia')\n >>> cm.show_colormaps(search='171')\n\n References\n ----------\n\n \"\"\"\n\n if search is not None:\n maps = sorted({k: v for (k, v) in cmlist.items() if k.lower().count(search.lower())})\n if len(maps) == 0:\n raise KeyError('No color maps found for search term \"{:s}\"'.format(search))\n else:\n maps = sorted(cmlist)\n\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(7, 10),dpi=128)\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n plt.show()\n", "path": "sunpy/cm/cm.py"}]} | 3,076 | 191 |
gh_patches_debug_13624 | rasdani/github-patches | git_diff | encode__httpx-1391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Path encoding broken for non-ASCII in WSGI mode
### Describe the bug
When using httpx to call into a WSGI application (my preferred way to test Flask app nowadays), the web view receives urlencoded paths.
I'm not sure where this really belongs to (is it a bug in httpx or Flask!?), but I've been working around it for months, so I guess I'll make myself unpopular and try it here first. If it turns out to be Flask's fault, I'l headl over and bother the Pallets folks.
### To reproduce
```python
from flask import Flask, request
import httpx
app = Flask(__name__)
@app.route('/<path>')
def hello_world(path):
return f"path: { path }, query: { request.args['a'] }, url: { request.url }"
if __name__ == "__main__":
with httpx.Client(app=app, base_url="http://testserver") as client:
resp = client.get("/ä", params={"a": "ä"})
print("httpx", resp.text)
with app.test_client() as client:
resp = client.get("/ä?a=%C3%A4")
print("flask", resp.get_data().decode("utf-8"))
```
### Expected behavior
```
httpx path: ä, query: ä, url: http://testserver/ä?a=ä
flask path: ä, query: ä, url: http://localhost/ä?a=ä
```
### Actual behavior
```
httpx path: %C3%A4, query: ä, url: http://testserver/%25C3%25A4?a=ä
flask path: ä, query: ä, url: http://localhost/ä?a=ä
```
NB
- it seems to handle query parameters just fine.
- `%25` is the ASCII code of the percent sign
### Environment
- OS: macOS
- Python version: 3.8.6
- HTTPX version: 0.16.1
- Async environment: n/a
- HTTP proxy: n/a
- Custom certificates: no
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_transports/wsgi.py`
Content:
```
1 import io
2 import itertools
3 import typing
4
5 import httpcore
6
7
8 def _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:
9 body = iter(body)
10 for chunk in body:
11 if chunk:
12 return itertools.chain([chunk], body)
13 return []
14
15
16 class WSGITransport(httpcore.SyncHTTPTransport):
17 """
18 A custom transport that handles sending requests directly to an WSGI app.
19 The simplest way to use this functionality is to use the `app` argument.
20
21 ```
22 client = httpx.Client(app=app)
23 ```
24
25 Alternatively, you can setup the transport instance explicitly.
26 This allows you to include any additional configuration arguments specific
27 to the WSGITransport class:
28
29 ```
30 transport = httpx.WSGITransport(
31 app=app,
32 script_name="/submount",
33 remote_addr="1.2.3.4"
34 )
35 client = httpx.Client(transport=transport)
36 ```
37
38 Arguments:
39
40 * `app` - The ASGI application.
41 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
42 should be raised. Default to `True`. Can be set to `False` for use cases
43 such as testing the content of a client 500 response.
44 * `script_name` - The root path on which the ASGI application should be mounted.
45 * `remote_addr` - A string indicating the client IP of incoming requests.
46 ```
47 """
48
49 def __init__(
50 self,
51 app: typing.Callable,
52 raise_app_exceptions: bool = True,
53 script_name: str = "",
54 remote_addr: str = "127.0.0.1",
55 ) -> None:
56 self.app = app
57 self.raise_app_exceptions = raise_app_exceptions
58 self.script_name = script_name
59 self.remote_addr = remote_addr
60
61 def request(
62 self,
63 method: bytes,
64 url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
65 headers: typing.List[typing.Tuple[bytes, bytes]] = None,
66 stream: httpcore.SyncByteStream = None,
67 ext: dict = None,
68 ) -> typing.Tuple[
69 int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict
70 ]:
71 headers = [] if headers is None else headers
72 stream = httpcore.PlainByteStream(content=b"") if stream is None else stream
73
74 scheme, host, port, full_path = url
75 path, _, query = full_path.partition(b"?")
76 environ = {
77 "wsgi.version": (1, 0),
78 "wsgi.url_scheme": scheme.decode("ascii"),
79 "wsgi.input": io.BytesIO(b"".join(stream)),
80 "wsgi.errors": io.BytesIO(),
81 "wsgi.multithread": True,
82 "wsgi.multiprocess": False,
83 "wsgi.run_once": False,
84 "REQUEST_METHOD": method.decode(),
85 "SCRIPT_NAME": self.script_name,
86 "PATH_INFO": path.decode("ascii"),
87 "QUERY_STRING": query.decode("ascii"),
88 "SERVER_NAME": host.decode("ascii"),
89 "SERVER_PORT": str(port),
90 "REMOTE_ADDR": self.remote_addr,
91 }
92 for header_key, header_value in headers:
93 key = header_key.decode("ascii").upper().replace("-", "_")
94 if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
95 key = "HTTP_" + key
96 environ[key] = header_value.decode("ascii")
97
98 seen_status = None
99 seen_response_headers = None
100 seen_exc_info = None
101
102 def start_response(
103 status: str, response_headers: list, exc_info: typing.Any = None
104 ) -> None:
105 nonlocal seen_status, seen_response_headers, seen_exc_info
106 seen_status = status
107 seen_response_headers = response_headers
108 seen_exc_info = exc_info
109
110 result = self.app(environ, start_response)
111 # This is needed because the status returned by start_response
112 # shouldn't be used until the first non-empty chunk has been served.
113 result = _skip_leading_empty_chunks(result)
114
115 assert seen_status is not None
116 assert seen_response_headers is not None
117 if seen_exc_info and self.raise_app_exceptions:
118 raise seen_exc_info[1]
119
120 status_code = int(seen_status.split()[0])
121 headers = [
122 (key.encode("ascii"), value.encode("ascii"))
123 for key, value in seen_response_headers
124 ]
125 stream = httpcore.IteratorByteStream(iterator=result)
126 ext = {}
127
128 return (status_code, headers, stream, ext)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py
--- a/httpx/_transports/wsgi.py
+++ b/httpx/_transports/wsgi.py
@@ -1,6 +1,7 @@
import io
import itertools
import typing
+from urllib.parse import unquote
import httpcore
@@ -83,7 +84,7 @@
"wsgi.run_once": False,
"REQUEST_METHOD": method.decode(),
"SCRIPT_NAME": self.script_name,
- "PATH_INFO": path.decode("ascii"),
+ "PATH_INFO": unquote(path.decode("ascii")),
"QUERY_STRING": query.decode("ascii"),
"SERVER_NAME": host.decode("ascii"),
"SERVER_PORT": str(port),
| {"golden_diff": "diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py\n--- a/httpx/_transports/wsgi.py\n+++ b/httpx/_transports/wsgi.py\n@@ -1,6 +1,7 @@\n import io\n import itertools\n import typing\n+from urllib.parse import unquote\n \n import httpcore\n \n@@ -83,7 +84,7 @@\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n- \"PATH_INFO\": path.decode(\"ascii\"),\n+ \"PATH_INFO\": unquote(path.decode(\"ascii\")),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n", "issue": "Path encoding broken for non-ASCII in WSGI mode\n### Describe the bug\r\n\r\nWhen using httpx to call into a WSGI application (my preferred way to test Flask app nowadays), the web view receives urlencoded paths.\r\n\r\nI'm not sure where this really belongs to (is it a bug in httpx or Flask!?), but I've been working around it for months, so I guess I'll make myself unpopular and try it here first. If it turns out to be Flask's fault, I'l headl over and bother the Pallets folks.\r\n\r\n### To reproduce\r\n\r\n```python\r\nfrom flask import Flask, request\r\n\r\nimport httpx\r\n\r\n\r\napp = Flask(__name__)\r\n\r\[email protected]('/<path>')\r\ndef hello_world(path):\r\n return f\"path: { path }, query: { request.args['a'] }, url: { request.url }\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with httpx.Client(app=app, base_url=\"http://testserver\") as client:\r\n resp = client.get(\"/\u00e4\", params={\"a\": \"\u00e4\"})\r\n\r\n print(\"httpx\", resp.text)\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/\u00e4?a=%C3%A4\")\r\n\r\n print(\"flask\", resp.get_data().decode(\"utf-8\"))\r\n```\r\n\r\n### Expected behavior\r\n\r\n```\r\nhttpx path: \u00e4, query: \u00e4, url: http://testserver/\u00e4?a=\u00e4\r\nflask path: \u00e4, query: \u00e4, url: http://localhost/\u00e4?a=\u00e4\r\n```\r\n\r\n### Actual behavior\r\n\r\n```\r\nhttpx path: %C3%A4, query: \u00e4, url: http://testserver/%25C3%25A4?a=\u00e4\r\nflask path: \u00e4, query: \u00e4, url: http://localhost/\u00e4?a=\u00e4\r\n```\r\n\r\nNB\r\n\r\n- it seems to handle query parameters just fine.\r\n- `%25` is the ASCII code of the percent sign\r\n\r\n### Environment\r\n\r\n- OS: macOS\r\n- Python version: 3.8.6\r\n- HTTPX version: 0.16.1\r\n- Async environment: n/a\r\n- HTTP proxy: n/a\r\n- Custom certificates: no\r\n\n", "before_files": [{"content": "import io\nimport itertools\nimport typing\n\nimport httpcore\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGITransport(httpcore.SyncHTTPTransport):\n \"\"\"\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name=\"/submount\",\n remote_addr=\"1.2.3.4\"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the ASGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: typing.Callable,\n raise_app_exceptions: bool = True,\n script_name: str = \"\",\n remote_addr: str = \"127.0.0.1\",\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n\n def request(\n self,\n method: bytes,\n url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],\n headers: typing.List[typing.Tuple[bytes, bytes]] = None,\n stream: httpcore.SyncByteStream = None,\n ext: dict = None,\n ) -> typing.Tuple[\n int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict\n ]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n \"wsgi.input\": io.BytesIO(b\"\".join(stream)),\n \"wsgi.errors\": io.BytesIO(),\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n \"PATH_INFO\": path.decode(\"ascii\"),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n \"REMOTE_ADDR\": self.remote_addr,\n }\n for header_key, header_value in headers:\n key = header_key.decode(\"ascii\").upper().replace(\"-\", \"_\")\n if key not in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n key = \"HTTP_\" + key\n environ[key] = header_value.decode(\"ascii\")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str, response_headers: list, exc_info: typing.Any = None\n ) -> None:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n\n result = self.app(environ, start_response)\n # This is needed because the status returned by start_response\n # shouldn't be used until the first non-empty chunk has been served.\n result = _skip_leading_empty_chunks(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode(\"ascii\"), value.encode(\"ascii\"))\n for key, value in seen_response_headers\n ]\n stream = httpcore.IteratorByteStream(iterator=result)\n ext = {}\n\n return (status_code, headers, stream, ext)\n", "path": "httpx/_transports/wsgi.py"}], "after_files": [{"content": "import io\nimport itertools\nimport typing\nfrom urllib.parse import unquote\n\nimport httpcore\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGITransport(httpcore.SyncHTTPTransport):\n \"\"\"\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name=\"/submount\",\n remote_addr=\"1.2.3.4\"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the ASGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: typing.Callable,\n raise_app_exceptions: bool = True,\n script_name: str = \"\",\n remote_addr: str = \"127.0.0.1\",\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n\n def request(\n self,\n method: bytes,\n url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],\n headers: typing.List[typing.Tuple[bytes, bytes]] = None,\n stream: httpcore.SyncByteStream = None,\n ext: dict = None,\n ) -> typing.Tuple[\n int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict\n ]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n \"wsgi.input\": io.BytesIO(b\"\".join(stream)),\n \"wsgi.errors\": io.BytesIO(),\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n \"PATH_INFO\": unquote(path.decode(\"ascii\")),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n \"REMOTE_ADDR\": self.remote_addr,\n }\n for header_key, header_value in headers:\n key = header_key.decode(\"ascii\").upper().replace(\"-\", \"_\")\n if key not in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n key = \"HTTP_\" + key\n environ[key] = header_value.decode(\"ascii\")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str, response_headers: list, exc_info: typing.Any = None\n ) -> None:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n\n result = self.app(environ, start_response)\n # This is needed because the status returned by start_response\n # shouldn't be used until the first non-empty chunk has been served.\n result = _skip_leading_empty_chunks(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode(\"ascii\"), value.encode(\"ascii\"))\n for key, value in seen_response_headers\n ]\n stream = httpcore.IteratorByteStream(iterator=result)\n ext = {}\n\n return (status_code, headers, stream, ext)\n", "path": "httpx/_transports/wsgi.py"}]} | 2,017 | 165 |
gh_patches_debug_14076 | rasdani/github-patches | git_diff | google__clusterfuzz-1726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
in local development, zip uploads are broken
```
| File "/build/clusterfuzz/src/appengine/handlers/jobs.py", line 132, in post
| blob_info = self.get_upload()
| File "/build/clusterfuzz/src/appengine/handlers/base_handler.py", line 317, in get_upload
| blob_info = storage.GcsBlobInfo.from_key(upload_key)
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 556, in from_key
| logs.log_error('Failed to get blob from key %s.' % key)
| LogError: Failed to get blob from key 0e3179ad-31b4-4ba5-a45c-86a610e065c7.
| Traceback (most recent call last):
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 554, in from_key
| return GcsBlobInfo(blobs_bucket(), key)
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 536, in __init__
| self.filename = gcs_object['metadata'].get(BLOB_FILENAME_METADATA_KEY)
| TypeError: 'NoneType' object is not subscriptable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/local/butler/run_server.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """run_server.py run the Clusterfuzz server locally."""
15 from __future__ import print_function
16 from future import standard_library
17 standard_library.install_aliases()
18 import os
19 import shutil
20 import threading
21 import time
22 import urllib.request
23
24 from local.butler import appengine
25 from local.butler import common
26 from local.butler import constants
27 from python.config import local_config
28 from python.tests.test_libs import test_utils
29
30
31 def bootstrap_db():
32 """Bootstrap the DB."""
33
34 def bootstrap():
35 # Wait for the server to run.
36 time.sleep(10)
37 print('Bootstrapping datastore...')
38 common.execute(
39 ('python butler.py run setup '
40 '--non-dry-run --local --config-dir={config_dir}'
41 ).format(config_dir=constants.TEST_CONFIG_DIR),
42 exit_on_error=False)
43
44 thread = threading.Thread(target=bootstrap)
45 thread.start()
46
47
48 def create_local_bucket(local_gcs_buckets_path, name):
49 """Create a local bucket."""
50 blobs_bucket = os.path.join(local_gcs_buckets_path, name)
51 if not os.path.exists(blobs_bucket):
52 os.mkdir(blobs_bucket)
53
54
55 def bootstrap_gcs(storage_path):
56 """Bootstrap GCS."""
57 local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')
58 if not os.path.exists(local_gcs_buckets_path):
59 os.mkdir(local_gcs_buckets_path)
60
61 config = local_config.ProjectConfig()
62 create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
63 create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))
64 create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))
65 create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))
66 create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))
67 create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))
68 create_local_bucket(local_gcs_buckets_path,
69 config.get('env.QUARANTINE_BUCKET'))
70 create_local_bucket(local_gcs_buckets_path,
71 config.get('env.SHARED_CORPUS_BUCKET'))
72 create_local_bucket(local_gcs_buckets_path,
73 config.get('env.FUZZ_LOGS_BUCKET'))
74 create_local_bucket(local_gcs_buckets_path,
75 config.get('env.MUTATOR_PLUGINS_BUCKET'))
76
77 # Symlink local GCS bucket path to appengine src dir to bypass sandboxing
78 # issues.
79 common.symlink(
80 src=local_gcs_buckets_path,
81 target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))
82
83
84 def start_cron_threads():
85 """Start threads to trigger essential cron jobs."""
86
87 request_timeout = 10 * 60 # 10 minutes.
88
89 def trigger(interval_seconds, target):
90 """Trigger a cron job."""
91 while True:
92 time.sleep(interval_seconds)
93
94 try:
95 url = 'http://{host}/{target}'.format(
96 host=constants.CRON_SERVICE_HOST, target=target)
97 request = urllib.request.Request(url)
98 request.add_header('X-Appengine-Cron', 'true')
99 response = urllib.request.urlopen(request, timeout=request_timeout)
100 response.read(60) # wait for request to finish.
101 except Exception:
102 continue
103
104 crons = (
105 (90, 'cleanup'),
106 (60, 'triage'),
107 (6 * 3600, 'schedule-progression-tasks'),
108 (12 * 3600, 'schedule-corpus-pruning'),
109 )
110
111 for interval, cron in crons:
112 thread = threading.Thread(target=trigger, args=(interval, cron))
113 thread.daemon = True
114 thread.start()
115
116
117 def execute(args):
118 """Run the server."""
119 os.environ['LOCAL_DEVELOPMENT'] = 'True'
120 common.kill_leftover_emulators()
121
122 if not args.skip_install_deps:
123 common.install_dependencies()
124
125 # Do this everytime as a past deployment might have changed these.
126 appengine.symlink_dirs()
127
128 # Deploy all yaml files from test project for basic appengine deployment and
129 # local testing to work. This needs to be called on every iteration as a past
130 # deployment might have overwritten or deleted these config files.
131 yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3')
132 appengine.copy_yamls_and_preprocess(yaml_paths)
133
134 # Build templates.
135 appengine.build_templates()
136
137 # Clean storage directory if needed.
138 if args.bootstrap or args.clean:
139 if os.path.exists(args.storage_path):
140 print('Clearing local datastore by removing %s.' % args.storage_path)
141 shutil.rmtree(args.storage_path)
142 if not os.path.exists(args.storage_path):
143 os.makedirs(args.storage_path)
144
145 # Set up local GCS buckets and symlinks.
146 bootstrap_gcs(args.storage_path)
147
148 # Start pubsub emulator.
149 pubsub_emulator = test_utils.start_cloud_emulator(
150 'pubsub',
151 args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],
152 data_dir=args.storage_path)
153 test_utils.setup_pubsub(constants.TEST_APP_ID)
154
155 # Start Datastore emulator
156 datastore_emulator = test_utils.start_cloud_emulator(
157 'datastore',
158 args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],
159 data_dir=args.storage_path,
160 store_on_disk=True)
161
162 # Start our custom GCS emulator.
163 local_gcs = common.execute_async(
164 'go run emulators/gcs.go -storage-path=' + args.storage_path, cwd='local')
165
166 if args.bootstrap:
167 bootstrap_db()
168
169 start_cron_threads()
170
171 os.environ['APPLICATION_ID'] = constants.TEST_APP_ID
172 os.environ['LOCAL_DEVELOPMENT'] = 'True'
173 os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'
174 os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST
175 os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST
176 os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST
177 os.environ['GAE_ENV'] = 'dev'
178 try:
179 cron_server = common.execute_async(
180 'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),
181 cwd=os.path.join('src', 'appengine'))
182
183 common.execute(
184 'gunicorn -b :{port} main:app'.format(
185 port=constants.DEV_APPSERVER_PORT),
186 cwd=os.path.join('src', 'appengine'))
187 except KeyboardInterrupt:
188 print('Server has been stopped. Exit.')
189 cron_server.terminate()
190 datastore_emulator.cleanup()
191 pubsub_emulator.cleanup()
192 local_gcs.terminate()
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/local/butler/run_server.py b/src/local/butler/run_server.py
--- a/src/local/butler/run_server.py
+++ b/src/local/butler/run_server.py
@@ -59,7 +59,12 @@
os.mkdir(local_gcs_buckets_path)
config = local_config.ProjectConfig()
- create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
+ test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET')
+ if test_blobs_bucket:
+ create_local_bucket(local_gcs_buckets_path, test_blobs_bucket)
+ else:
+ create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
+
create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))
| {"golden_diff": "diff --git a/src/local/butler/run_server.py b/src/local/butler/run_server.py\n--- a/src/local/butler/run_server.py\n+++ b/src/local/butler/run_server.py\n@@ -59,7 +59,12 @@\n os.mkdir(local_gcs_buckets_path)\n \n config = local_config.ProjectConfig()\n- create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n+ test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET')\n+ if test_blobs_bucket:\n+ create_local_bucket(local_gcs_buckets_path, test_blobs_bucket)\n+ else:\n+ create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n+\n create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))\n", "issue": "in local development, zip uploads are broken\n```\r\n| File \"/build/clusterfuzz/src/appengine/handlers/jobs.py\", line 132, in post\r\n| blob_info = self.get_upload()\r\n| File \"/build/clusterfuzz/src/appengine/handlers/base_handler.py\", line 317, in get_upload\r\n| blob_info = storage.GcsBlobInfo.from_key(upload_key)\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 556, in from_key\r\n| logs.log_error('Failed to get blob from key %s.' % key)\r\n| LogError: Failed to get blob from key 0e3179ad-31b4-4ba5-a45c-86a610e065c7.\r\n| Traceback (most recent call last):\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 554, in from_key\r\n| return GcsBlobInfo(blobs_bucket(), key)\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 536, in __init__\r\n| self.filename = gcs_object['metadata'].get(BLOB_FILENAME_METADATA_KEY)\r\n| TypeError: 'NoneType' object is not subscriptable\r\n```\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"run_server.py run the Clusterfuzz server locally.\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nimport os\nimport shutil\nimport threading\nimport time\nimport urllib.request\n\nfrom local.butler import appengine\nfrom local.butler import common\nfrom local.butler import constants\nfrom python.config import local_config\nfrom python.tests.test_libs import test_utils\n\n\ndef bootstrap_db():\n \"\"\"Bootstrap the DB.\"\"\"\n\n def bootstrap():\n # Wait for the server to run.\n time.sleep(10)\n print('Bootstrapping datastore...')\n common.execute(\n ('python butler.py run setup '\n '--non-dry-run --local --config-dir={config_dir}'\n ).format(config_dir=constants.TEST_CONFIG_DIR),\n exit_on_error=False)\n\n thread = threading.Thread(target=bootstrap)\n thread.start()\n\n\ndef create_local_bucket(local_gcs_buckets_path, name):\n \"\"\"Create a local bucket.\"\"\"\n blobs_bucket = os.path.join(local_gcs_buckets_path, name)\n if not os.path.exists(blobs_bucket):\n os.mkdir(blobs_bucket)\n\n\ndef bootstrap_gcs(storage_path):\n \"\"\"Bootstrap GCS.\"\"\"\n local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')\n if not os.path.exists(local_gcs_buckets_path):\n os.mkdir(local_gcs_buckets_path)\n\n config = local_config.ProjectConfig()\n create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.QUARANTINE_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.SHARED_CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.FUZZ_LOGS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.MUTATOR_PLUGINS_BUCKET'))\n\n # Symlink local GCS bucket path to appengine src dir to bypass sandboxing\n # issues.\n common.symlink(\n src=local_gcs_buckets_path,\n target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))\n\n\ndef start_cron_threads():\n \"\"\"Start threads to trigger essential cron jobs.\"\"\"\n\n request_timeout = 10 * 60 # 10 minutes.\n\n def trigger(interval_seconds, target):\n \"\"\"Trigger a cron job.\"\"\"\n while True:\n time.sleep(interval_seconds)\n\n try:\n url = 'http://{host}/{target}'.format(\n host=constants.CRON_SERVICE_HOST, target=target)\n request = urllib.request.Request(url)\n request.add_header('X-Appengine-Cron', 'true')\n response = urllib.request.urlopen(request, timeout=request_timeout)\n response.read(60) # wait for request to finish.\n except Exception:\n continue\n\n crons = (\n (90, 'cleanup'),\n (60, 'triage'),\n (6 * 3600, 'schedule-progression-tasks'),\n (12 * 3600, 'schedule-corpus-pruning'),\n )\n\n for interval, cron in crons:\n thread = threading.Thread(target=trigger, args=(interval, cron))\n thread.daemon = True\n thread.start()\n\n\ndef execute(args):\n \"\"\"Run the server.\"\"\"\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n common.kill_leftover_emulators()\n\n if not args.skip_install_deps:\n common.install_dependencies()\n\n # Do this everytime as a past deployment might have changed these.\n appengine.symlink_dirs()\n\n # Deploy all yaml files from test project for basic appengine deployment and\n # local testing to work. This needs to be called on every iteration as a past\n # deployment might have overwritten or deleted these config files.\n yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3')\n appengine.copy_yamls_and_preprocess(yaml_paths)\n\n # Build templates.\n appengine.build_templates()\n\n # Clean storage directory if needed.\n if args.bootstrap or args.clean:\n if os.path.exists(args.storage_path):\n print('Clearing local datastore by removing %s.' % args.storage_path)\n shutil.rmtree(args.storage_path)\n if not os.path.exists(args.storage_path):\n os.makedirs(args.storage_path)\n\n # Set up local GCS buckets and symlinks.\n bootstrap_gcs(args.storage_path)\n\n # Start pubsub emulator.\n pubsub_emulator = test_utils.start_cloud_emulator(\n 'pubsub',\n args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],\n data_dir=args.storage_path)\n test_utils.setup_pubsub(constants.TEST_APP_ID)\n\n # Start Datastore emulator\n datastore_emulator = test_utils.start_cloud_emulator(\n 'datastore',\n args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],\n data_dir=args.storage_path,\n store_on_disk=True)\n\n # Start our custom GCS emulator.\n local_gcs = common.execute_async(\n 'go run emulators/gcs.go -storage-path=' + args.storage_path, cwd='local')\n\n if args.bootstrap:\n bootstrap_db()\n\n start_cron_threads()\n\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'\n os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['GAE_ENV'] = 'dev'\n try:\n cron_server = common.execute_async(\n 'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),\n cwd=os.path.join('src', 'appengine'))\n\n common.execute(\n 'gunicorn -b :{port} main:app'.format(\n port=constants.DEV_APPSERVER_PORT),\n cwd=os.path.join('src', 'appengine'))\n except KeyboardInterrupt:\n print('Server has been stopped. Exit.')\n cron_server.terminate()\n datastore_emulator.cleanup()\n pubsub_emulator.cleanup()\n local_gcs.terminate()\n", "path": "src/local/butler/run_server.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"run_server.py run the Clusterfuzz server locally.\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nimport os\nimport shutil\nimport threading\nimport time\nimport urllib.request\n\nfrom local.butler import appengine\nfrom local.butler import common\nfrom local.butler import constants\nfrom python.config import local_config\nfrom python.tests.test_libs import test_utils\n\n\ndef bootstrap_db():\n \"\"\"Bootstrap the DB.\"\"\"\n\n def bootstrap():\n # Wait for the server to run.\n time.sleep(10)\n print('Bootstrapping datastore...')\n common.execute(\n ('python butler.py run setup '\n '--non-dry-run --local --config-dir={config_dir}'\n ).format(config_dir=constants.TEST_CONFIG_DIR),\n exit_on_error=False)\n\n thread = threading.Thread(target=bootstrap)\n thread.start()\n\n\ndef create_local_bucket(local_gcs_buckets_path, name):\n \"\"\"Create a local bucket.\"\"\"\n blobs_bucket = os.path.join(local_gcs_buckets_path, name)\n if not os.path.exists(blobs_bucket):\n os.mkdir(blobs_bucket)\n\n\ndef bootstrap_gcs(storage_path):\n \"\"\"Bootstrap GCS.\"\"\"\n local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')\n if not os.path.exists(local_gcs_buckets_path):\n os.mkdir(local_gcs_buckets_path)\n\n config = local_config.ProjectConfig()\n test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET')\n if test_blobs_bucket:\n create_local_bucket(local_gcs_buckets_path, test_blobs_bucket)\n else:\n create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n\n create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.QUARANTINE_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.SHARED_CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.FUZZ_LOGS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.MUTATOR_PLUGINS_BUCKET'))\n\n # Symlink local GCS bucket path to appengine src dir to bypass sandboxing\n # issues.\n common.symlink(\n src=local_gcs_buckets_path,\n target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))\n\n\ndef start_cron_threads():\n \"\"\"Start threads to trigger essential cron jobs.\"\"\"\n\n request_timeout = 10 * 60 # 10 minutes.\n\n def trigger(interval_seconds, target):\n \"\"\"Trigger a cron job.\"\"\"\n while True:\n time.sleep(interval_seconds)\n\n try:\n url = 'http://{host}/{target}'.format(\n host=constants.CRON_SERVICE_HOST, target=target)\n request = urllib.request.Request(url)\n request.add_header('X-Appengine-Cron', 'true')\n response = urllib.request.urlopen(request, timeout=request_timeout)\n response.read(60) # wait for request to finish.\n except Exception:\n continue\n\n crons = (\n (90, 'cleanup'),\n (60, 'triage'),\n (6 * 3600, 'schedule-progression-tasks'),\n (12 * 3600, 'schedule-corpus-pruning'),\n )\n\n for interval, cron in crons:\n thread = threading.Thread(target=trigger, args=(interval, cron))\n thread.daemon = True\n thread.start()\n\n\ndef execute(args):\n \"\"\"Run the server.\"\"\"\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n common.kill_leftover_emulators()\n\n if not args.skip_install_deps:\n common.install_dependencies()\n\n # Do this everytime as a past deployment might have changed these.\n appengine.symlink_dirs()\n\n # Deploy all yaml files from test project for basic appengine deployment and\n # local testing to work. This needs to be called on every iteration as a past\n # deployment might have overwritten or deleted these config files.\n yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3')\n appengine.copy_yamls_and_preprocess(yaml_paths)\n\n # Build templates.\n appengine.build_templates()\n\n # Clean storage directory if needed.\n if args.bootstrap or args.clean:\n if os.path.exists(args.storage_path):\n print('Clearing local datastore by removing %s.' % args.storage_path)\n shutil.rmtree(args.storage_path)\n if not os.path.exists(args.storage_path):\n os.makedirs(args.storage_path)\n\n # Set up local GCS buckets and symlinks.\n bootstrap_gcs(args.storage_path)\n\n # Start pubsub emulator.\n pubsub_emulator = test_utils.start_cloud_emulator(\n 'pubsub',\n args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],\n data_dir=args.storage_path)\n test_utils.setup_pubsub(constants.TEST_APP_ID)\n\n # Start Datastore emulator\n datastore_emulator = test_utils.start_cloud_emulator(\n 'datastore',\n args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],\n data_dir=args.storage_path,\n store_on_disk=True)\n\n # Start our custom GCS emulator.\n local_gcs = common.execute_async(\n 'go run emulators/gcs.go -storage-path=' + args.storage_path, cwd='local')\n\n if args.bootstrap:\n bootstrap_db()\n\n start_cron_threads()\n\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'\n os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['GAE_ENV'] = 'dev'\n try:\n cron_server = common.execute_async(\n 'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),\n cwd=os.path.join('src', 'appengine'))\n\n common.execute(\n 'gunicorn -b :{port} main:app'.format(\n port=constants.DEV_APPSERVER_PORT),\n cwd=os.path.join('src', 'appengine'))\n except KeyboardInterrupt:\n print('Server has been stopped. Exit.')\n cron_server.terminate()\n datastore_emulator.cleanup()\n pubsub_emulator.cleanup()\n local_gcs.terminate()\n", "path": "src/local/butler/run_server.py"}]} | 2,610 | 206 |
gh_patches_debug_27208 | rasdani/github-patches | git_diff | scrapy__scrapy-4721 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Smarter generator check for combined yield/return statements: ignore nested functions
## Summary
Currently, if spider methods are generators that yield results and contain nested function, then the following warning is issued:
```
[py.warnings] WARNING: /Library/Python/3.7/site-packages/scrapy/core/scraper.py:148: UserWarning: The "MySpider.parse" method is a generator and includes a "return" statement with a value different than None. This could lead to unexpected behaviour. Please see https://docs.python.org/3/reference/simple_stmts.html#the-return-statement for details about the semantics of the "return" statement within generators
warn_on_generator_with_return_value(spider, callback)
```
The example of a simple spider that results in the warning:
```
import scrapy
class MySpider(scrapy.Spider):
name = "MySpider"
start_urls = ["https://scrapy.org"]
def parse(self, response):
def is_external(url):
href = url.css('::attr(href)').get()
return href.startswith('http') and 'scrapy.org' not in href
links = [link for link in response.css('a') if is_external(link)]
for link in links:
yield {'link': link.css('::attr(href)').get(), 'text': link.css('::text').get()}
```
I know it's a bit artificial example as the nested function can be moved, but there is nothing wrong with nested function conceptually.
## Motivation
I have a midsize spider function that includes some nested helper functions that I'd like to keep close to where they are called.
## Describe alternatives you've considered
Moving nested function out of the generator is an easy fix, but it constrains expressivity of the code.
## Additional context
Related function: is_generator_with_return_value
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/misc.py`
Content:
```
1 """Helper functions which don't fit anywhere else"""
2 import ast
3 import inspect
4 import os
5 import re
6 import hashlib
7 import warnings
8 from contextlib import contextmanager
9 from importlib import import_module
10 from pkgutil import iter_modules
11 from textwrap import dedent
12
13 from w3lib.html import replace_entities
14
15 from scrapy.utils.datatypes import LocalWeakReferencedCache
16 from scrapy.utils.python import flatten, to_unicode
17 from scrapy.item import _BaseItem
18 from scrapy.utils.deprecate import ScrapyDeprecationWarning
19
20
21 _ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes
22
23
24 def arg_to_iter(arg):
25 """Convert an argument to an iterable. The argument can be a None, single
26 value, or an iterable.
27
28 Exception: if arg is a dict, [arg] will be returned
29 """
30 if arg is None:
31 return []
32 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
33 return arg
34 else:
35 return [arg]
36
37
38 def load_object(path):
39 """Load an object given its absolute object path, and return it.
40
41 object can be the import path of a class, function, variable or an
42 instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
43 """
44
45 try:
46 dot = path.rindex('.')
47 except ValueError:
48 raise ValueError("Error loading object '%s': not a full path" % path)
49
50 module, name = path[:dot], path[dot + 1:]
51 mod = import_module(module)
52
53 try:
54 obj = getattr(mod, name)
55 except AttributeError:
56 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
57
58 return obj
59
60
61 def walk_modules(path):
62 """Loads a module and all its submodules from the given module path and
63 returns them. If *any* module throws an exception while importing, that
64 exception is thrown back.
65
66 For example: walk_modules('scrapy.utils')
67 """
68
69 mods = []
70 mod = import_module(path)
71 mods.append(mod)
72 if hasattr(mod, '__path__'):
73 for _, subpath, ispkg in iter_modules(mod.__path__):
74 fullpath = path + '.' + subpath
75 if ispkg:
76 mods += walk_modules(fullpath)
77 else:
78 submod = import_module(fullpath)
79 mods.append(submod)
80 return mods
81
82
83 def extract_regex(regex, text, encoding='utf-8'):
84 """Extract a list of unicode strings from the given text/encoding using the following policies:
85
86 * if the regex contains a named group called "extract" that will be returned
87 * if the regex contains multiple numbered groups, all those will be returned (flattened)
88 * if the regex doesn't contain any group the entire regex matching is returned
89 """
90 warnings.warn(
91 "scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.",
92 ScrapyDeprecationWarning,
93 stacklevel=2
94 )
95
96 if isinstance(regex, str):
97 regex = re.compile(regex, re.UNICODE)
98
99 try:
100 strings = [regex.search(text).group('extract')] # named group
101 except Exception:
102 strings = regex.findall(text) # full regex or numbered groups
103 strings = flatten(strings)
104
105 if isinstance(text, str):
106 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
107 else:
108 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
109 for s in strings]
110
111
112 def md5sum(file):
113 """Calculate the md5 checksum of a file-like object without reading its
114 whole content in memory.
115
116 >>> from io import BytesIO
117 >>> md5sum(BytesIO(b'file content to hash'))
118 '784406af91dd5a54fbb9c84c2236595a'
119 """
120 m = hashlib.md5()
121 while True:
122 d = file.read(8096)
123 if not d:
124 break
125 m.update(d)
126 return m.hexdigest()
127
128
129 def rel_has_nofollow(rel):
130 """Return True if link rel attribute has nofollow type"""
131 return rel is not None and 'nofollow' in rel.split()
132
133
134 def create_instance(objcls, settings, crawler, *args, **kwargs):
135 """Construct a class instance using its ``from_crawler`` or
136 ``from_settings`` constructors, if available.
137
138 At least one of ``settings`` and ``crawler`` needs to be different from
139 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
140 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
141 tried.
142
143 ``*args`` and ``**kwargs`` are forwarded to the constructors.
144
145 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
146
147 .. versionchanged:: 2.2
148 Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
149 extension has not been implemented correctly).
150 """
151 if settings is None:
152 if crawler is None:
153 raise ValueError("Specify at least one of settings and crawler.")
154 settings = crawler.settings
155 if crawler and hasattr(objcls, 'from_crawler'):
156 instance = objcls.from_crawler(crawler, *args, **kwargs)
157 method_name = 'from_crawler'
158 elif hasattr(objcls, 'from_settings'):
159 instance = objcls.from_settings(settings, *args, **kwargs)
160 method_name = 'from_settings'
161 else:
162 instance = objcls(*args, **kwargs)
163 method_name = '__new__'
164 if instance is None:
165 raise TypeError("%s.%s returned None" % (objcls.__qualname__, method_name))
166 return instance
167
168
169 @contextmanager
170 def set_environ(**kwargs):
171 """Temporarily set environment variables inside the context manager and
172 fully restore previous environment afterwards
173 """
174
175 original_env = {k: os.environ.get(k) for k in kwargs}
176 os.environ.update(kwargs)
177 try:
178 yield
179 finally:
180 for k, v in original_env.items():
181 if v is None:
182 del os.environ[k]
183 else:
184 os.environ[k] = v
185
186
187 _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
188
189
190 def is_generator_with_return_value(callable):
191 """
192 Returns True if a callable is a generator function which includes a
193 'return' statement with a value different than None, False otherwise
194 """
195 if callable in _generator_callbacks_cache:
196 return _generator_callbacks_cache[callable]
197
198 def returns_none(return_node):
199 value = return_node.value
200 return value is None or isinstance(value, ast.NameConstant) and value.value is None
201
202 if inspect.isgeneratorfunction(callable):
203 tree = ast.parse(dedent(inspect.getsource(callable)))
204 for node in ast.walk(tree):
205 if isinstance(node, ast.Return) and not returns_none(node):
206 _generator_callbacks_cache[callable] = True
207 return _generator_callbacks_cache[callable]
208
209 _generator_callbacks_cache[callable] = False
210 return _generator_callbacks_cache[callable]
211
212
213 def warn_on_generator_with_return_value(spider, callable):
214 """
215 Logs a warning if a callable is a generator function and includes
216 a 'return' statement with a value different than None
217 """
218 if is_generator_with_return_value(callable):
219 warnings.warn(
220 'The "{}.{}" method is a generator and includes a "return" statement with a '
221 'value different than None. This could lead to unexpected behaviour. Please see '
222 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
223 'for details about the semantics of the "return" statement within generators'
224 .format(spider.__class__.__name__, callable.__name__), stacklevel=2,
225 )
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -5,6 +5,7 @@
import re
import hashlib
import warnings
+from collections import deque
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
@@ -184,6 +185,22 @@
os.environ[k] = v
+def walk_callable(node):
+ """Similar to ``ast.walk``, but walks only function body and skips nested
+ functions defined within the node.
+ """
+ todo = deque([node])
+ walked_func_def = False
+ while todo:
+ node = todo.popleft()
+ if isinstance(node, ast.FunctionDef):
+ if walked_func_def:
+ continue
+ walked_func_def = True
+ todo.extend(ast.iter_child_nodes(node))
+ yield node
+
+
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
@@ -201,7 +218,7 @@
if inspect.isgeneratorfunction(callable):
tree = ast.parse(dedent(inspect.getsource(callable)))
- for node in ast.walk(tree):
+ for node in walk_callable(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return _generator_callbacks_cache[callable]
| {"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -5,6 +5,7 @@\n import re\n import hashlib\n import warnings\n+from collections import deque\n from contextlib import contextmanager\n from importlib import import_module\n from pkgutil import iter_modules\n@@ -184,6 +185,22 @@\n os.environ[k] = v\n \n \n+def walk_callable(node):\n+ \"\"\"Similar to ``ast.walk``, but walks only function body and skips nested\n+ functions defined within the node.\n+ \"\"\"\n+ todo = deque([node])\n+ walked_func_def = False\n+ while todo:\n+ node = todo.popleft()\n+ if isinstance(node, ast.FunctionDef):\n+ if walked_func_def:\n+ continue\n+ walked_func_def = True\n+ todo.extend(ast.iter_child_nodes(node))\n+ yield node\n+\n+\n _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n \n \n@@ -201,7 +218,7 @@\n \n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n- for node in ast.walk(tree):\n+ for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n", "issue": "Smarter generator check for combined yield/return statements: ignore nested functions\n## Summary\r\nCurrently, if spider methods are generators that yield results and contain nested function, then the following warning is issued:\r\n\r\n```\r\n[py.warnings] WARNING: /Library/Python/3.7/site-packages/scrapy/core/scraper.py:148: UserWarning: The \"MySpider.parse\" method is a generator and includes a \"return\" statement with a value different than None. This could lead to unexpected behaviour. Please see https://docs.python.org/3/reference/simple_stmts.html#the-return-statement for details about the semantics of the \"return\" statement within generators\r\n warn_on_generator_with_return_value(spider, callback)\r\n```\r\n\r\nThe example of a simple spider that results in the warning:\r\n```\r\nimport scrapy\r\n\r\nclass MySpider(scrapy.Spider):\r\n name = \"MySpider\"\r\n start_urls = [\"https://scrapy.org\"]\r\n \r\n def parse(self, response):\r\n \r\n def is_external(url):\r\n href = url.css('::attr(href)').get()\r\n return href.startswith('http') and 'scrapy.org' not in href\r\n \r\n links = [link for link in response.css('a') if is_external(link)]\r\n for link in links:\r\n yield {'link': link.css('::attr(href)').get(), 'text': link.css('::text').get()}\r\n```\r\n\r\nI know it's a bit artificial example as the nested function can be moved, but there is nothing wrong with nested function conceptually.\r\n\r\n## Motivation\r\n\r\nI have a midsize spider function that includes some nested helper functions that I'd like to keep close to where they are called.\r\n\r\n## Describe alternatives you've considered\r\n\r\nMoving nested function out of the generator is an easy fix, but it constrains expressivity of the code.\r\n\r\n## Additional context\r\n\r\nRelated function: is_generator_with_return_value\r\n\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import _BaseItem\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\n\n\n_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n warnings.warn(\n \"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.\",\n ScrapyDeprecationWarning,\n stacklevel=2\n )\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n .. versionchanged:: 2.2\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in ast.walk(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}], "after_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom collections import deque\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import _BaseItem\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\n\n\n_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n warnings.warn(\n \"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.\",\n ScrapyDeprecationWarning,\n stacklevel=2\n )\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n .. versionchanged:: 2.2\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\ndef walk_callable(node):\n \"\"\"Similar to ``ast.walk``, but walks only function body and skips nested\n functions defined within the node.\n \"\"\"\n todo = deque([node])\n walked_func_def = False\n while todo:\n node = todo.popleft()\n if isinstance(node, ast.FunctionDef):\n if walked_func_def:\n continue\n walked_func_def = True\n todo.extend(ast.iter_child_nodes(node))\n yield node\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}]} | 2,961 | 320 |
gh_patches_debug_29181 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
project_list in container add project form contains all projects
@CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projectcontainers/forms.py`
Content:
```
1 from django.db.models import Q
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard.forms import ProjectCreateForm
5 from adhocracy4.dashboard.forms import ProjectDashboardForm
6 from meinberlin.apps.contrib.widgets import Select2MultipleWidget
7
8 from . import models
9
10 LABELS = {
11 'name': _('Title of your container'),
12 'description': _('Short description of your container'),
13 'tile_image': _('Logo'),
14 }
15
16 HELP_TEXTS = {
17 'name': _('This title will appear on the '
18 'teaser card and on top of the container '
19 'detail page. It should be max. 120 characters long'),
20 'description': _('This short description will appear on '
21 'the header of the container and in the teaser. '
22 'It should briefly state the goal of the '
23 'projects in max. 250 chars.'),
24 'tile_image': _(
25 'The image will be shown in the container tile.'
26 ),
27 }
28
29
30 class ContainerCreateForm(ProjectCreateForm):
31
32 class Meta:
33 model = models.ProjectContainer
34 fields = ['name', 'description',
35 'tile_image', 'tile_image_copyright']
36 labels = LABELS
37 help_texts = HELP_TEXTS
38
39
40 class ContainerBasicForm(ProjectDashboardForm):
41
42 class Meta:
43 model = models.ProjectContainer
44 fields = ['name', 'description', 'tile_image',
45 'tile_image_copyright', 'is_archived']
46 required_for_project_publish = ['name', 'description']
47 labels = LABELS
48 help_texts = HELP_TEXTS
49
50
51 class ContainerInformationForm(ProjectDashboardForm):
52
53 class Meta:
54 model = models.ProjectContainer
55 fields = ['information']
56 required_for_project_publish = ['information']
57 labels = {
58 'information': _('Description of your container'),
59 }
60
61
62 class ContainerProjectsForm(ProjectDashboardForm):
63
64 def __init__(self, *args, **kwargs):
65 super().__init__(*args, **kwargs)
66
67 # We tried to find a good balance between a short list and
68 # all necessary projects. The details may change over time.
69 # Projects that are already selected should remain in the queryset.
70 self.fields['projects'].queryset = self.fields['projects'].queryset \
71 .filter(projectcontainer=None)\
72 .filter(Q(containers=self.instance) |
73 (Q(containers=None) &
74 Q(is_archived=False) &
75 Q(is_public=True)))\
76 .order_by('name')
77
78 class Meta:
79 model = models.ProjectContainer
80 fields = ['projects']
81 required_for_project_publish = ['projects']
82 widgets = {
83 'projects': Select2MultipleWidget,
84 }
85
```
Path: `meinberlin/apps/projectcontainers/views.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2 from django.views import generic
3
4 from adhocracy4.dashboard import mixins as dashboard_mixins
5 from adhocracy4.dashboard.blueprints import ProjectBlueprint
6 from adhocracy4.dashboard.components.forms.views import \
7 ProjectComponentFormView
8 from adhocracy4.dashboard.views import ProjectCreateView
9
10 from . import forms
11 from . import models
12
13
14 class ContainerCreateView(ProjectCreateView):
15 model = models.ProjectContainer
16 slug_url_kwarg = 'project_slug'
17 form_class = forms.ContainerCreateForm
18 template_name = 'meinberlin_projectcontainers/container_create_form.html'
19 success_message = _('Container successfully created.')
20
21 blueprint = ProjectBlueprint(
22 title=_('Container'),
23 description=_(
24 'A container contains multiple projects.'
25 ),
26 content=[],
27 image='',
28 settings_model=None,
29 )
30
31
32 class ContainerBasicFormView(ProjectComponentFormView):
33 model = models.ProjectContainer
34
35 @property
36 def project(self):
37 project = super().project
38 return project.projectcontainer
39
40 def get_object(self, queryset=None):
41 return self.project
42
43
44 class ContainerProjectsView(ProjectComponentFormView):
45 model = models.ProjectContainer
46
47 @property
48 def project(self):
49 project = super().project
50 return project.projectcontainer
51
52 def get_object(self, queryset=None):
53 return self.project
54
55
56 class ContainerListView(dashboard_mixins.DashboardBaseMixin,
57 generic.ListView):
58 model = models.ProjectContainer
59 paginate_by = 12
60 template_name = 'meinberlin_projectcontainers/container_list.html'
61 permission_required = 'a4projects.add_project'
62 menu_item = 'project'
63
64 def get_queryset(self):
65 return super().get_queryset().filter(
66 organisation=self.organisation
67 )
68
69 def get_permission_object(self):
70 return self.organisation
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py
--- a/meinberlin/apps/projectcontainers/forms.py
+++ b/meinberlin/apps/projectcontainers/forms.py
@@ -62,12 +62,20 @@
class ContainerProjectsForm(ProjectDashboardForm):
def __init__(self, *args, **kwargs):
+ self.user = kwargs.pop('user')
+ self.organisation = kwargs.pop('organisation')
super().__init__(*args, **kwargs)
- # We tried to find a good balance between a short list and
- # all necessary projects. The details may change over time.
- # Projects that are already selected should remain in the queryset.
- self.fields['projects'].queryset = self.fields['projects'].queryset \
+ projects = self.fields['projects']\
+ .queryset.filter(organisation=self.organisation)
+ if not self.organisation.has_initiator(self.user):
+ user_groups = self.user.groups.all()
+ org_groups = self.organisation.groups.all()
+ shared_groups = user_groups & org_groups
+ group = shared_groups.distinct().first()
+ projects = projects.filter(group=group)
+
+ self.fields['projects'].queryset = projects \
.filter(projectcontainer=None)\
.filter(Q(containers=self.instance) |
(Q(containers=None) &
diff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py
--- a/meinberlin/apps/projectcontainers/views.py
+++ b/meinberlin/apps/projectcontainers/views.py
@@ -52,6 +52,12 @@
def get_object(self, queryset=None):
return self.project
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs['user'] = self.request.user
+ kwargs['organisation'] = self.project.organisation
+ return kwargs
+
class ContainerListView(dashboard_mixins.DashboardBaseMixin,
generic.ListView):
| {"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py\n--- a/meinberlin/apps/projectcontainers/forms.py\n+++ b/meinberlin/apps/projectcontainers/forms.py\n@@ -62,12 +62,20 @@\n class ContainerProjectsForm(ProjectDashboardForm):\n \n def __init__(self, *args, **kwargs):\n+ self.user = kwargs.pop('user')\n+ self.organisation = kwargs.pop('organisation')\n super().__init__(*args, **kwargs)\n \n- # We tried to find a good balance between a short list and\n- # all necessary projects. The details may change over time.\n- # Projects that are already selected should remain in the queryset.\n- self.fields['projects'].queryset = self.fields['projects'].queryset \\\n+ projects = self.fields['projects']\\\n+ .queryset.filter(organisation=self.organisation)\n+ if not self.organisation.has_initiator(self.user):\n+ user_groups = self.user.groups.all()\n+ org_groups = self.organisation.groups.all()\n+ shared_groups = user_groups & org_groups\n+ group = shared_groups.distinct().first()\n+ projects = projects.filter(group=group)\n+\n+ self.fields['projects'].queryset = projects \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\ndiff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py\n--- a/meinberlin/apps/projectcontainers/views.py\n+++ b/meinberlin/apps/projectcontainers/views.py\n@@ -52,6 +52,12 @@\n def get_object(self, queryset=None):\n return self.project\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs['user'] = self.request.user\n+ kwargs['organisation'] = self.project.organisation\n+ return kwargs\n+\n \n class ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n", "issue": "project_list in container add project form contains all projects\n@CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way?\r\n\n", "before_files": [{"content": "from django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectCreateForm\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom meinberlin.apps.contrib.widgets import Select2MultipleWidget\n\nfrom . import models\n\nLABELS = {\n 'name': _('Title of your container'),\n 'description': _('Short description of your container'),\n 'tile_image': _('Logo'),\n}\n\nHELP_TEXTS = {\n 'name': _('This title will appear on the '\n 'teaser card and on top of the container '\n 'detail page. It should be max. 120 characters long'),\n 'description': _('This short description will appear on '\n 'the header of the container and in the teaser. '\n 'It should briefly state the goal of the '\n 'projects in max. 250 chars.'),\n 'tile_image': _(\n 'The image will be shown in the container tile.'\n ),\n}\n\n\nclass ContainerCreateForm(ProjectCreateForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description',\n 'tile_image', 'tile_image_copyright']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived']\n required_for_project_publish = ['name', 'description']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['information']\n required_for_project_publish = ['information']\n labels = {\n 'information': _('Description of your container'),\n }\n\n\nclass ContainerProjectsForm(ProjectDashboardForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # We tried to find a good balance between a short list and\n # all necessary projects. The details may change over time.\n # Projects that are already selected should remain in the queryset.\n self.fields['projects'].queryset = self.fields['projects'].queryset \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\n Q(is_archived=False) &\n Q(is_public=True)))\\\n .order_by('name')\n\n class Meta:\n model = models.ProjectContainer\n fields = ['projects']\n required_for_project_publish = ['projects']\n widgets = {\n 'projects': Select2MultipleWidget,\n }\n", "path": "meinberlin/apps/projectcontainers/forms.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.views import ProjectCreateView\n\nfrom . import forms\nfrom . import models\n\n\nclass ContainerCreateView(ProjectCreateView):\n model = models.ProjectContainer\n slug_url_kwarg = 'project_slug'\n form_class = forms.ContainerCreateForm\n template_name = 'meinberlin_projectcontainers/container_create_form.html'\n success_message = _('Container successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Container'),\n description=_(\n 'A container contains multiple projects.'\n ),\n content=[],\n image='',\n settings_model=None,\n )\n\n\nclass ContainerBasicFormView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerProjectsView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = models.ProjectContainer\n paginate_by = 12\n template_name = 'meinberlin_projectcontainers/container_list.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n return super().get_queryset().filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/projectcontainers/views.py"}], "after_files": [{"content": "from django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectCreateForm\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom meinberlin.apps.contrib.widgets import Select2MultipleWidget\n\nfrom . import models\n\nLABELS = {\n 'name': _('Title of your container'),\n 'description': _('Short description of your container'),\n 'tile_image': _('Logo'),\n}\n\nHELP_TEXTS = {\n 'name': _('This title will appear on the '\n 'teaser card and on top of the container '\n 'detail page. It should be max. 120 characters long'),\n 'description': _('This short description will appear on '\n 'the header of the container and in the teaser. '\n 'It should briefly state the goal of the '\n 'projects in max. 250 chars.'),\n 'tile_image': _(\n 'The image will be shown in the container tile.'\n ),\n}\n\n\nclass ContainerCreateForm(ProjectCreateForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description',\n 'tile_image', 'tile_image_copyright']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived']\n required_for_project_publish = ['name', 'description']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['information']\n required_for_project_publish = ['information']\n labels = {\n 'information': _('Description of your container'),\n }\n\n\nclass ContainerProjectsForm(ProjectDashboardForm):\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.organisation = kwargs.pop('organisation')\n super().__init__(*args, **kwargs)\n\n projects = self.fields['projects']\\\n .queryset.filter(organisation=self.organisation)\n if not self.organisation.has_initiator(self.user):\n user_groups = self.user.groups.all()\n org_groups = self.organisation.groups.all()\n shared_groups = user_groups & org_groups\n group = shared_groups.distinct().first()\n projects = projects.filter(group=group)\n\n self.fields['projects'].queryset = projects \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\n Q(is_archived=False) &\n Q(is_public=True)))\\\n .order_by('name')\n\n class Meta:\n model = models.ProjectContainer\n fields = ['projects']\n required_for_project_publish = ['projects']\n widgets = {\n 'projects': Select2MultipleWidget,\n }\n", "path": "meinberlin/apps/projectcontainers/forms.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.views import ProjectCreateView\n\nfrom . import forms\nfrom . import models\n\n\nclass ContainerCreateView(ProjectCreateView):\n model = models.ProjectContainer\n slug_url_kwarg = 'project_slug'\n form_class = forms.ContainerCreateForm\n template_name = 'meinberlin_projectcontainers/container_create_form.html'\n success_message = _('Container successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Container'),\n description=_(\n 'A container contains multiple projects.'\n ),\n content=[],\n image='',\n settings_model=None,\n )\n\n\nclass ContainerBasicFormView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerProjectsView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n kwargs['organisation'] = self.project.organisation\n return kwargs\n\n\nclass ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = models.ProjectContainer\n paginate_by = 12\n template_name = 'meinberlin_projectcontainers/container_list.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n return super().get_queryset().filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/projectcontainers/views.py"}]} | 1,604 | 443 |
gh_patches_debug_6159 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-3155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Invalid MarkdownV1 parsing
### Steps to Reproduce
1. Call the `mention_markdown(123, "Keeping_the_underscores", version=1)` function from `telegram.helpers`
### Expected behaviour
`[Keeping_the_underscores](tg://user?id=123)`, that is valid MarkdownV1 and consistent with https://core.telegram.org/bots/api#markdown-style.
### Actual behaviour
`[Keeping\\_the\\_underscores](tg://user?id=123)`. However this will fail to parse into well-formed MarkdownV1 -- in fact Telegram will respond with an error if it occurs in a message with `parse_mode = Markdown`.
### Operating System
Linux
### Version of Python, python-telegram-bot & dependencies
```shell
python-telegram-bot==20.0a2
```
### Relevant log output
_No response_
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/helpers.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2022
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains convenience helper functions.
20
21 .. versionchanged:: 20.0
22 Previously, the contents of this module were available through the (no longer existing)
23 module ``telegram.utils.helpers``.
24 """
25
26 __all__ = (
27 "create_deep_linked_url",
28 "effective_message_type",
29 "escape_markdown",
30 "mention_html",
31 "mention_markdown",
32 )
33
34 import re
35 from html import escape
36 from typing import TYPE_CHECKING, Optional, Union
37
38 from telegram.constants import MessageType
39
40 if TYPE_CHECKING:
41 from telegram import Message, Update
42
43
44 def escape_markdown(text: str, version: int = 1, entity_type: str = None) -> str:
45 """Helper function to escape telegram markup symbols.
46
47 Args:
48 text (:obj:`str`): The text.
49 version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
50 Either ``1`` or ``2``. Defaults to ``1``.
51 entity_type (:obj:`str`, optional): For the entity types
52 :tg-const:`telegram.MessageEntity.PRE`, :tg-const:`telegram.MessageEntity.CODE` and
53 the link part of :tg-const:`telegram.MessageEntity.TEXT_LINK`, only certain characters
54 need to be escaped in :tg-const:`telegram.constants.ParseMode.MARKDOWN_V2`.
55 See the official API documentation for details. Only valid in combination with
56 ``version=2``, will be ignored else.
57 """
58 if int(version) == 1:
59 escape_chars = r"_*`["
60 elif int(version) == 2:
61 if entity_type in ["pre", "code"]:
62 escape_chars = r"\`"
63 elif entity_type == "text_link":
64 escape_chars = r"\)"
65 else:
66 escape_chars = r"\_*[]()~`>#+-=|{}.!"
67 else:
68 raise ValueError("Markdown version must be either 1 or 2!")
69
70 return re.sub(f"([{re.escape(escape_chars)}])", r"\\\1", text)
71
72
73 def mention_html(user_id: Union[int, str], name: str) -> str:
74 """
75 Args:
76 user_id (:obj:`int`): The user's id which you want to mention.
77 name (:obj:`str`): The name the mention is showing.
78
79 Returns:
80 :obj:`str`: The inline mention for the user as HTML.
81 """
82 return f'<a href="tg://user?id={user_id}">{escape(name)}</a>'
83
84
85 def mention_markdown(user_id: Union[int, str], name: str, version: int = 1) -> str:
86 """
87 Args:
88 user_id (:obj:`int`): The user's id which you want to mention.
89 name (:obj:`str`): The name the mention is showing.
90 version (:obj:`int` | :obj:`str`): Use to specify the version of Telegram's Markdown.
91 Either ``1`` or ``2``. Defaults to ``1``.
92
93 Returns:
94 :obj:`str`: The inline mention for the user as Markdown.
95 """
96 return f"[{escape_markdown(name, version=version)}](tg://user?id={user_id})"
97
98
99 def effective_message_type(entity: Union["Message", "Update"]) -> Optional[str]:
100 """
101 Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
102 :class:`telegram.Update`.
103
104 Args:
105 entity (:class:`telegram.Update` | :class:`telegram.Message`): The ``update`` or
106 ``message`` to extract from.
107
108 Returns:
109 :obj:`str` | :obj:`None`: One of :class:`telegram.constants.MessageType` if the entity
110 contains a message that matches one of those types. :obj:`None` otherwise.
111
112 """
113 # Importing on file-level yields cyclic Import Errors
114 from telegram import Message, Update # pylint: disable=import-outside-toplevel
115
116 if isinstance(entity, Message):
117 message = entity
118 elif isinstance(entity, Update):
119 if not entity.effective_message:
120 return None
121 message = entity.effective_message
122 else:
123 raise TypeError(f"The entity is neither Message nor Update (got: {type(entity)})")
124
125 for message_type in MessageType:
126 if message[message_type]:
127 return message_type
128
129 return None
130
131
132 def create_deep_linked_url(bot_username: str, payload: str = None, group: bool = False) -> str:
133 """
134 Creates a deep-linked URL for this :paramref:`bot_username` with the specified
135 :paramref:`payload`. See https://core.telegram.org/bots#deep-linking to learn more.
136
137 The :paramref:`payload` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``
138
139 Note:
140 Works well in conjunction with
141 ``CommandHandler("start", callback, filters=filters.Regex('payload'))``
142
143 Examples:
144 ``create_deep_linked_url(bot.get_me().username, "some-params")``
145
146 Args:
147 bot_username (:obj:`str`): The username to link to
148 payload (:obj:`str`, optional): Parameters to encode in the created URL
149 group (:obj:`bool`, optional): If :obj:`True` the user is prompted to select a group to
150 add the bot to. If :obj:`False`, opens a one-on-one conversation with the bot.
151 Defaults to :obj:`False`.
152
153 Returns:
154 :obj:`str`: An URL to start the bot with specific parameters
155 """
156 if bot_username is None or len(bot_username) <= 3:
157 raise ValueError("You must provide a valid bot_username.")
158
159 base_url = f"https://t.me/{bot_username}"
160 if not payload:
161 return base_url
162
163 if len(payload) > 64:
164 raise ValueError("The deep-linking payload must not exceed 64 characters.")
165
166 if not re.match(r"^[A-Za-z0-9_-]+$", payload):
167 raise ValueError(
168 "Only the following characters are allowed for deep-linked "
169 "URLs: A-Z, a-z, 0-9, _ and -"
170 )
171
172 if group:
173 key = "startgroup"
174 else:
175 key = "start"
176
177 return f"{base_url}?{key}={payload}"
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/helpers.py b/telegram/helpers.py
--- a/telegram/helpers.py
+++ b/telegram/helpers.py
@@ -93,7 +93,10 @@
Returns:
:obj:`str`: The inline mention for the user as Markdown.
"""
- return f"[{escape_markdown(name, version=version)}](tg://user?id={user_id})"
+ tg_link = f"tg://user?id={user_id}"
+ if version == 1:
+ return f"[{name}]({tg_link})"
+ return f"[{escape_markdown(name, version=version)}]({tg_link})"
def effective_message_type(entity: Union["Message", "Update"]) -> Optional[str]:
| {"golden_diff": "diff --git a/telegram/helpers.py b/telegram/helpers.py\n--- a/telegram/helpers.py\n+++ b/telegram/helpers.py\n@@ -93,7 +93,10 @@\n Returns:\n :obj:`str`: The inline mention for the user as Markdown.\n \"\"\"\n- return f\"[{escape_markdown(name, version=version)}](tg://user?id={user_id})\"\n+ tg_link = f\"tg://user?id={user_id}\"\n+ if version == 1:\n+ return f\"[{name}]({tg_link})\"\n+ return f\"[{escape_markdown(name, version=version)}]({tg_link})\"\n \n \n def effective_message_type(entity: Union[\"Message\", \"Update\"]) -> Optional[str]:\n", "issue": "[BUG] Invalid MarkdownV1 parsing\n### Steps to Reproduce\r\n\r\n1. Call the `mention_markdown(123, \"Keeping_the_underscores\", version=1)` function from `telegram.helpers`\r\n\r\n### Expected behaviour\r\n\r\n`[Keeping_the_underscores](tg://user?id=123)`, that is valid MarkdownV1 and consistent with https://core.telegram.org/bots/api#markdown-style.\r\n\r\n### Actual behaviour\r\n\r\n`[Keeping\\\\_the\\\\_underscores](tg://user?id=123)`. However this will fail to parse into well-formed MarkdownV1 -- in fact Telegram will respond with an error if it occurs in a message with `parse_mode = Markdown`.\r\n\r\n### Operating System\r\n\r\nLinux\r\n\r\n### Version of Python, python-telegram-bot & dependencies\r\n\r\n```shell\r\npython-telegram-bot==20.0a2\r\n```\r\n\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains convenience helper functions.\n\n.. versionchanged:: 20.0\n Previously, the contents of this module were available through the (no longer existing)\n module ``telegram.utils.helpers``.\n\"\"\"\n\n__all__ = (\n \"create_deep_linked_url\",\n \"effective_message_type\",\n \"escape_markdown\",\n \"mention_html\",\n \"mention_markdown\",\n)\n\nimport re\nfrom html import escape\nfrom typing import TYPE_CHECKING, Optional, Union\n\nfrom telegram.constants import MessageType\n\nif TYPE_CHECKING:\n from telegram import Message, Update\n\n\ndef escape_markdown(text: str, version: int = 1, entity_type: str = None) -> str:\n \"\"\"Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types\n :tg-const:`telegram.MessageEntity.PRE`, :tg-const:`telegram.MessageEntity.CODE` and\n the link part of :tg-const:`telegram.MessageEntity.TEXT_LINK`, only certain characters\n need to be escaped in :tg-const:`telegram.constants.ParseMode.MARKDOWN_V2`.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n \"\"\"\n if int(version) == 1:\n escape_chars = r\"_*`[\"\n elif int(version) == 2:\n if entity_type in [\"pre\", \"code\"]:\n escape_chars = r\"\\`\"\n elif entity_type == \"text_link\":\n escape_chars = r\"\\)\"\n else:\n escape_chars = r\"\\_*[]()~`>#+-=|{}.!\"\n else:\n raise ValueError(\"Markdown version must be either 1 or 2!\")\n\n return re.sub(f\"([{re.escape(escape_chars)}])\", r\"\\\\\\1\", text)\n\n\ndef mention_html(user_id: Union[int, str], name: str) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n return f'<a href=\"tg://user?id={user_id}\">{escape(name)}</a>'\n\n\ndef mention_markdown(user_id: Union[int, str], name: str, version: int = 1) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of Telegram's Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n\n Returns:\n :obj:`str`: The inline mention for the user as Markdown.\n \"\"\"\n return f\"[{escape_markdown(name, version=version)}](tg://user?id={user_id})\"\n\n\ndef effective_message_type(entity: Union[\"Message\", \"Update\"]) -> Optional[str]:\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:class:`telegram.Update` | :class:`telegram.Message`): The ``update`` or\n ``message`` to extract from.\n\n Returns:\n :obj:`str` | :obj:`None`: One of :class:`telegram.constants.MessageType` if the entity\n contains a message that matches one of those types. :obj:`None` otherwise.\n\n \"\"\"\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message, Update # pylint: disable=import-outside-toplevel\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n if not entity.effective_message:\n return None\n message = entity.effective_message\n else:\n raise TypeError(f\"The entity is neither Message nor Update (got: {type(entity)})\")\n\n for message_type in MessageType:\n if message[message_type]:\n return message_type\n\n return None\n\n\ndef create_deep_linked_url(bot_username: str, payload: str = None, group: bool = False) -> str:\n \"\"\"\n Creates a deep-linked URL for this :paramref:`bot_username` with the specified\n :paramref:`payload`. See https://core.telegram.org/bots#deep-linking to learn more.\n\n The :paramref:`payload` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler(\"start\", callback, filters=filters.Regex('payload'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, \"some-params\")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If :obj:`True` the user is prompted to select a group to\n add the bot to. If :obj:`False`, opens a one-on-one conversation with the bot.\n Defaults to :obj:`False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n \"\"\"\n if bot_username is None or len(bot_username) <= 3:\n raise ValueError(\"You must provide a valid bot_username.\")\n\n base_url = f\"https://t.me/{bot_username}\"\n if not payload:\n return base_url\n\n if len(payload) > 64:\n raise ValueError(\"The deep-linking payload must not exceed 64 characters.\")\n\n if not re.match(r\"^[A-Za-z0-9_-]+$\", payload):\n raise ValueError(\n \"Only the following characters are allowed for deep-linked \"\n \"URLs: A-Z, a-z, 0-9, _ and -\"\n )\n\n if group:\n key = \"startgroup\"\n else:\n key = \"start\"\n\n return f\"{base_url}?{key}={payload}\"\n", "path": "telegram/helpers.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains convenience helper functions.\n\n.. versionchanged:: 20.0\n Previously, the contents of this module were available through the (no longer existing)\n module ``telegram.utils.helpers``.\n\"\"\"\n\n__all__ = (\n \"create_deep_linked_url\",\n \"effective_message_type\",\n \"escape_markdown\",\n \"mention_html\",\n \"mention_markdown\",\n)\n\nimport re\nfrom html import escape\nfrom typing import TYPE_CHECKING, Optional, Union\n\nfrom telegram.constants import MessageType\n\nif TYPE_CHECKING:\n from telegram import Message, Update\n\n\ndef escape_markdown(text: str, version: int = 1, entity_type: str = None) -> str:\n \"\"\"Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types\n :tg-const:`telegram.MessageEntity.PRE`, :tg-const:`telegram.MessageEntity.CODE` and\n the link part of :tg-const:`telegram.MessageEntity.TEXT_LINK`, only certain characters\n need to be escaped in :tg-const:`telegram.constants.ParseMode.MARKDOWN_V2`.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n \"\"\"\n if int(version) == 1:\n escape_chars = r\"_*`[\"\n elif int(version) == 2:\n if entity_type in [\"pre\", \"code\"]:\n escape_chars = r\"\\`\"\n elif entity_type == \"text_link\":\n escape_chars = r\"\\)\"\n else:\n escape_chars = r\"\\_*[]()~`>#+-=|{}.!\"\n else:\n raise ValueError(\"Markdown version must be either 1 or 2!\")\n\n return re.sub(f\"([{re.escape(escape_chars)}])\", r\"\\\\\\1\", text)\n\n\ndef mention_html(user_id: Union[int, str], name: str) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n return f'<a href=\"tg://user?id={user_id}\">{escape(name)}</a>'\n\n\ndef mention_markdown(user_id: Union[int, str], name: str, version: int = 1) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of Telegram's Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n\n Returns:\n :obj:`str`: The inline mention for the user as Markdown.\n \"\"\"\n tg_link = f\"tg://user?id={user_id}\"\n if version == 1:\n return f\"[{name}]({tg_link})\"\n return f\"[{escape_markdown(name, version=version)}]({tg_link})\"\n\n\ndef effective_message_type(entity: Union[\"Message\", \"Update\"]) -> Optional[str]:\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:class:`telegram.Update` | :class:`telegram.Message`): The ``update`` or\n ``message`` to extract from.\n\n Returns:\n :obj:`str` | :obj:`None`: One of :class:`telegram.constants.MessageType` if the entity\n contains a message that matches one of those types. :obj:`None` otherwise.\n\n \"\"\"\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message, Update # pylint: disable=import-outside-toplevel\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n if not entity.effective_message:\n return None\n message = entity.effective_message\n else:\n raise TypeError(f\"The entity is neither Message nor Update (got: {type(entity)})\")\n\n for message_type in MessageType:\n if message[message_type]:\n return message_type\n\n return None\n\n\ndef create_deep_linked_url(bot_username: str, payload: str = None, group: bool = False) -> str:\n \"\"\"\n Creates a deep-linked URL for this :paramref:`bot_username` with the specified\n :paramref:`payload`. See https://core.telegram.org/bots#deep-linking to learn more.\n\n The :paramref:`payload` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler(\"start\", callback, filters=filters.Regex('payload'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, \"some-params\")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If :obj:`True` the user is prompted to select a group to\n add the bot to. If :obj:`False`, opens a one-on-one conversation with the bot.\n Defaults to :obj:`False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n \"\"\"\n if bot_username is None or len(bot_username) <= 3:\n raise ValueError(\"You must provide a valid bot_username.\")\n\n base_url = f\"https://t.me/{bot_username}\"\n if not payload:\n return base_url\n\n if len(payload) > 64:\n raise ValueError(\"The deep-linking payload must not exceed 64 characters.\")\n\n if not re.match(r\"^[A-Za-z0-9_-]+$\", payload):\n raise ValueError(\n \"Only the following characters are allowed for deep-linked \"\n \"URLs: A-Z, a-z, 0-9, _ and -\"\n )\n\n if group:\n key = \"startgroup\"\n else:\n key = \"start\"\n\n return f\"{base_url}?{key}={payload}\"\n", "path": "telegram/helpers.py"}]} | 2,467 | 158 |
gh_patches_debug_6613 | rasdani/github-patches | git_diff | CTFd__CTFd-2515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting dynamic challenges by ID does not return decay function
When getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function.
This seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`.
This seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/plugins/dynamic_challenges/__init__.py`
Content:
```
1 from flask import Blueprint
2
3 from CTFd.models import Challenges, db
4 from CTFd.plugins import register_plugin_assets_directory
5 from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
6 from CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic
7 from CTFd.plugins.migrations import upgrade
8
9
10 class DynamicChallenge(Challenges):
11 __mapper_args__ = {"polymorphic_identity": "dynamic"}
12 id = db.Column(
13 db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True
14 )
15 initial = db.Column(db.Integer, default=0)
16 minimum = db.Column(db.Integer, default=0)
17 decay = db.Column(db.Integer, default=0)
18 function = db.Column(db.String(32), default="logarithmic")
19
20 def __init__(self, *args, **kwargs):
21 super(DynamicChallenge, self).__init__(**kwargs)
22 self.value = kwargs["initial"]
23
24
25 class DynamicValueChallenge(BaseChallenge):
26 id = "dynamic" # Unique identifier used to register challenges
27 name = "dynamic" # Name of a challenge type
28 templates = (
29 { # Handlebars templates used for each aspect of challenge editing & viewing
30 "create": "/plugins/dynamic_challenges/assets/create.html",
31 "update": "/plugins/dynamic_challenges/assets/update.html",
32 "view": "/plugins/dynamic_challenges/assets/view.html",
33 }
34 )
35 scripts = { # Scripts that are loaded when a template is loaded
36 "create": "/plugins/dynamic_challenges/assets/create.js",
37 "update": "/plugins/dynamic_challenges/assets/update.js",
38 "view": "/plugins/dynamic_challenges/assets/view.js",
39 }
40 # Route at which files are accessible. This must be registered using register_plugin_assets_directory()
41 route = "/plugins/dynamic_challenges/assets/"
42 # Blueprint used to access the static_folder directory.
43 blueprint = Blueprint(
44 "dynamic_challenges",
45 __name__,
46 template_folder="templates",
47 static_folder="assets",
48 )
49 challenge_model = DynamicChallenge
50
51 @classmethod
52 def calculate_value(cls, challenge):
53 f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)
54 value = f(challenge)
55
56 challenge.value = value
57 db.session.commit()
58 return challenge
59
60 @classmethod
61 def read(cls, challenge):
62 """
63 This method is in used to access the data of a challenge in a format processable by the front end.
64
65 :param challenge:
66 :return: Challenge object, data dictionary to be returned to the user
67 """
68 challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()
69 data = {
70 "id": challenge.id,
71 "name": challenge.name,
72 "value": challenge.value,
73 "initial": challenge.initial,
74 "decay": challenge.decay,
75 "minimum": challenge.minimum,
76 "description": challenge.description,
77 "connection_info": challenge.connection_info,
78 "next_id": challenge.next_id,
79 "category": challenge.category,
80 "state": challenge.state,
81 "max_attempts": challenge.max_attempts,
82 "type": challenge.type,
83 "type_data": {
84 "id": cls.id,
85 "name": cls.name,
86 "templates": cls.templates,
87 "scripts": cls.scripts,
88 },
89 }
90 return data
91
92 @classmethod
93 def update(cls, challenge, request):
94 """
95 This method is used to update the information associated with a challenge. This should be kept strictly to the
96 Challenges table and any child tables.
97
98 :param challenge:
99 :param request:
100 :return:
101 """
102 data = request.form or request.get_json()
103
104 for attr, value in data.items():
105 # We need to set these to floats so that the next operations don't operate on strings
106 if attr in ("initial", "minimum", "decay"):
107 value = float(value)
108 setattr(challenge, attr, value)
109
110 return DynamicValueChallenge.calculate_value(challenge)
111
112 @classmethod
113 def solve(cls, user, team, challenge, request):
114 super().solve(user, team, challenge, request)
115
116 DynamicValueChallenge.calculate_value(challenge)
117
118
119 def load(app):
120 upgrade(plugin_name="dynamic_challenges")
121 CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge
122 register_plugin_assets_directory(
123 app, base_path="/plugins/dynamic_challenges/assets/"
124 )
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py
--- a/CTFd/plugins/dynamic_challenges/__init__.py
+++ b/CTFd/plugins/dynamic_challenges/__init__.py
@@ -73,6 +73,7 @@
"initial": challenge.initial,
"decay": challenge.decay,
"minimum": challenge.minimum,
+ "function": challenge.function,
"description": challenge.description,
"connection_info": challenge.connection_info,
"next_id": challenge.next_id,
| {"golden_diff": "diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py\n--- a/CTFd/plugins/dynamic_challenges/__init__.py\n+++ b/CTFd/plugins/dynamic_challenges/__init__.py\n@@ -73,6 +73,7 @@\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n+ \"function\": challenge.function,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n", "issue": "Getting dynamic challenges by ID does not return decay function\nWhen getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function.\r\n\r\nThis seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`.\r\n\r\nThis seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response.\r\n\n", "before_files": [{"content": "from flask import Blueprint\n\nfrom CTFd.models import Challenges, db\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge\nfrom CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic\nfrom CTFd.plugins.migrations import upgrade\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {\"polymorphic_identity\": \"dynamic\"}\n id = db.Column(\n db.Integer, db.ForeignKey(\"challenges.id\", ondelete=\"CASCADE\"), primary_key=True\n )\n initial = db.Column(db.Integer, default=0)\n minimum = db.Column(db.Integer, default=0)\n decay = db.Column(db.Integer, default=0)\n function = db.Column(db.String(32), default=\"logarithmic\")\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.value = kwargs[\"initial\"]\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = (\n { # Handlebars templates used for each aspect of challenge editing & viewing\n \"create\": \"/plugins/dynamic_challenges/assets/create.html\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.html\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.html\",\n }\n )\n scripts = { # Scripts that are loaded when a template is loaded\n \"create\": \"/plugins/dynamic_challenges/assets/create.js\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.js\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.js\",\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = \"/plugins/dynamic_challenges/assets/\"\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint(\n \"dynamic_challenges\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"assets\",\n )\n challenge_model = DynamicChallenge\n\n @classmethod\n def calculate_value(cls, challenge):\n f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)\n value = f(challenge)\n\n challenge.value = value\n db.session.commit()\n return challenge\n\n @classmethod\n def read(cls, challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n \"id\": challenge.id,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n \"category\": challenge.category,\n \"state\": challenge.state,\n \"max_attempts\": challenge.max_attempts,\n \"type\": challenge.type,\n \"type_data\": {\n \"id\": cls.id,\n \"name\": cls.name,\n \"templates\": cls.templates,\n \"scripts\": cls.scripts,\n },\n }\n return data\n\n @classmethod\n def update(cls, challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n\n for attr, value in data.items():\n # We need to set these to floats so that the next operations don't operate on strings\n if attr in (\"initial\", \"minimum\", \"decay\"):\n value = float(value)\n setattr(challenge, attr, value)\n\n return DynamicValueChallenge.calculate_value(challenge)\n\n @classmethod\n def solve(cls, user, team, challenge, request):\n super().solve(user, team, challenge, request)\n\n DynamicValueChallenge.calculate_value(challenge)\n\n\ndef load(app):\n upgrade(plugin_name=\"dynamic_challenges\")\n CHALLENGE_CLASSES[\"dynamic\"] = DynamicValueChallenge\n register_plugin_assets_directory(\n app, base_path=\"/plugins/dynamic_challenges/assets/\"\n )\n", "path": "CTFd/plugins/dynamic_challenges/__init__.py"}], "after_files": [{"content": "from flask import Blueprint\n\nfrom CTFd.models import Challenges, db\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge\nfrom CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic\nfrom CTFd.plugins.migrations import upgrade\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {\"polymorphic_identity\": \"dynamic\"}\n id = db.Column(\n db.Integer, db.ForeignKey(\"challenges.id\", ondelete=\"CASCADE\"), primary_key=True\n )\n initial = db.Column(db.Integer, default=0)\n minimum = db.Column(db.Integer, default=0)\n decay = db.Column(db.Integer, default=0)\n function = db.Column(db.String(32), default=\"logarithmic\")\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.value = kwargs[\"initial\"]\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = (\n { # Handlebars templates used for each aspect of challenge editing & viewing\n \"create\": \"/plugins/dynamic_challenges/assets/create.html\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.html\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.html\",\n }\n )\n scripts = { # Scripts that are loaded when a template is loaded\n \"create\": \"/plugins/dynamic_challenges/assets/create.js\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.js\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.js\",\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = \"/plugins/dynamic_challenges/assets/\"\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint(\n \"dynamic_challenges\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"assets\",\n )\n challenge_model = DynamicChallenge\n\n @classmethod\n def calculate_value(cls, challenge):\n f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)\n value = f(challenge)\n\n challenge.value = value\n db.session.commit()\n return challenge\n\n @classmethod\n def read(cls, challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n \"id\": challenge.id,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n \"function\": challenge.function,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n \"category\": challenge.category,\n \"state\": challenge.state,\n \"max_attempts\": challenge.max_attempts,\n \"type\": challenge.type,\n \"type_data\": {\n \"id\": cls.id,\n \"name\": cls.name,\n \"templates\": cls.templates,\n \"scripts\": cls.scripts,\n },\n }\n return data\n\n @classmethod\n def update(cls, challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n\n for attr, value in data.items():\n # We need to set these to floats so that the next operations don't operate on strings\n if attr in (\"initial\", \"minimum\", \"decay\"):\n value = float(value)\n setattr(challenge, attr, value)\n\n return DynamicValueChallenge.calculate_value(challenge)\n\n @classmethod\n def solve(cls, user, team, challenge, request):\n super().solve(user, team, challenge, request)\n\n DynamicValueChallenge.calculate_value(challenge)\n\n\ndef load(app):\n upgrade(plugin_name=\"dynamic_challenges\")\n CHALLENGE_CLASSES[\"dynamic\"] = DynamicValueChallenge\n register_plugin_assets_directory(\n app, base_path=\"/plugins/dynamic_challenges/assets/\"\n )\n", "path": "CTFd/plugins/dynamic_challenges/__init__.py"}]} | 1,623 | 127 |
gh_patches_debug_9313 | rasdani/github-patches | git_diff | facebookresearch__hydra-1364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Ax-Plugin] Add support for Python 3.9
Python 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676
Related to #1062
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_ax_sweeper/setup.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from setuptools import find_namespace_packages, setup
4
5 with open("README.md", "r") as fh:
6 LONG_DESC = fh.read()
7 setup(
8 name="hydra-ax-sweeper",
9 version="1.1.0rc1",
10 author="Omry Yadan, Shagun Sodhani",
11 author_email="[email protected], [email protected]",
12 description="Hydra Ax Sweeper plugin",
13 long_description=LONG_DESC,
14 long_description_content_type="text/markdown",
15 url="https://github.com/facebookresearch/hydra/",
16 packages=find_namespace_packages(include=["hydra_plugins.*"]),
17 classifiers=[
18 "License :: OSI Approved :: MIT License",
19 "Programming Language :: Python :: 3.7",
20 "Programming Language :: Python :: 3.8",
21 # "Programming Language :: Python :: 3.9",
22 "Operating System :: POSIX :: Linux",
23 "Operating System :: MacOS",
24 "Development Status :: 4 - Beta",
25 ],
26 install_requires=["hydra-core>=1.0.0", "ax-platform>=0.1.13"],
27 include_package_data=True,
28 )
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py
--- a/plugins/hydra_ax_sweeper/setup.py
+++ b/plugins/hydra_ax_sweeper/setup.py
@@ -18,7 +18,7 @@
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
- # "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Development Status :: 4 - Beta",
| {"golden_diff": "diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py\n--- a/plugins/hydra_ax_sweeper/setup.py\n+++ b/plugins/hydra_ax_sweeper/setup.py\n@@ -18,7 +18,7 @@\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n", "issue": "[Ax-Plugin] Add support for Python 3.9\nPython 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676\r\n\r\nRelated to #1062 \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"ax-platform>=0.1.13\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"ax-platform>=0.1.13\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}]} | 661 | 151 |
gh_patches_debug_14560 | rasdani/github-patches | git_diff | python-trio__trio-1527 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
trio.open_signal_receiver should give an error if no arguments are passed
User confusion spotted in the wild: https://gitter.im/python-trio/general?at=5ebfa9d613878c30b581b9fe
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `trio/_signals.py`
Content:
```
1 import signal
2 from contextlib import contextmanager
3 from collections import OrderedDict
4
5 import trio
6 from ._util import signal_raise, is_main_thread, ConflictDetector
7
8 # Discussion of signal handling strategies:
9 #
10 # - On Windows signals barely exist. There are no options; signal handlers are
11 # the only available API.
12 #
13 # - On Linux signalfd is arguably the natural way. Semantics: signalfd acts as
14 # an *alternative* signal delivery mechanism. The way you use it is to mask
15 # out the relevant signals process-wide (so that they don't get delivered
16 # the normal way), and then when you read from signalfd that actually counts
17 # as delivering it (despite the mask). The problem with this is that we
18 # don't have any reliable way to mask out signals process-wide -- the only
19 # way to do that in Python is to call pthread_sigmask from the main thread
20 # *before starting any other threads*, and as a library we can't really
21 # impose that, and the failure mode is annoying (signals get delivered via
22 # signal handlers whether we want them to or not).
23 #
24 # - on macOS/*BSD, kqueue is the natural way. Semantics: kqueue acts as an
25 # *extra* signal delivery mechanism. Signals are delivered the normal
26 # way, *and* are delivered to kqueue. So you want to set them to SIG_IGN so
27 # that they don't end up pending forever (I guess?). I can't find any actual
28 # docs on how masking and EVFILT_SIGNAL interact. I did see someone note
29 # that if a signal is pending when the kqueue filter is added then you
30 # *don't* get notified of that, which makes sense. But still, we have to
31 # manipulate signal state (e.g. setting SIG_IGN) which as far as Python is
32 # concerned means we have to do this from the main thread.
33 #
34 # So in summary, there don't seem to be any compelling advantages to using the
35 # platform-native signal notification systems; they're kinda nice, but it's
36 # simpler to implement the naive signal-handler-based system once and be
37 # done. (The big advantage would be if there were a reliable way to monitor
38 # for SIGCHLD from outside the main thread and without interfering with other
39 # libraries that also want to monitor for SIGCHLD. But there isn't. I guess
40 # kqueue might give us that, but in kqueue we don't need it, because kqueue
41 # can directly monitor for child process state changes.)
42
43
44 @contextmanager
45 def _signal_handler(signals, handler):
46 original_handlers = {}
47 try:
48 for signum in set(signals):
49 original_handlers[signum] = signal.signal(signum, handler)
50 yield
51 finally:
52 for signum, original_handler in original_handlers.items():
53 signal.signal(signum, original_handler)
54
55
56 class SignalReceiver:
57 def __init__(self):
58 # {signal num: None}
59 self._pending = OrderedDict()
60 self._lot = trio.lowlevel.ParkingLot()
61 self._conflict_detector = ConflictDetector(
62 "only one task can iterate on a signal receiver at a time"
63 )
64 self._closed = False
65
66 def _add(self, signum):
67 if self._closed:
68 signal_raise(signum)
69 else:
70 self._pending[signum] = None
71 self._lot.unpark()
72
73 def _redeliver_remaining(self):
74 # First make sure that any signals still in the delivery pipeline will
75 # get redelivered
76 self._closed = True
77
78 # And then redeliver any that are sitting in pending. This is done
79 # using a weird recursive construct to make sure we process everything
80 # even if some of the handlers raise exceptions.
81 def deliver_next():
82 if self._pending:
83 signum, _ = self._pending.popitem(last=False)
84 try:
85 signal_raise(signum)
86 finally:
87 deliver_next()
88
89 deliver_next()
90
91 # Helper for tests, not public or otherwise used
92 def _pending_signal_count(self):
93 return len(self._pending)
94
95 def __aiter__(self):
96 return self
97
98 async def __anext__(self):
99 if self._closed:
100 raise RuntimeError("open_signal_receiver block already exited")
101 # In principle it would be possible to support multiple concurrent
102 # calls to __anext__, but doing it without race conditions is quite
103 # tricky, and there doesn't seem to be any point in trying.
104 with self._conflict_detector:
105 if not self._pending:
106 await self._lot.park()
107 else:
108 await trio.lowlevel.checkpoint()
109 signum, _ = self._pending.popitem(last=False)
110 return signum
111
112
113 @contextmanager
114 def open_signal_receiver(*signals):
115 """A context manager for catching signals.
116
117 Entering this context manager starts listening for the given signals and
118 returns an async iterator; exiting the context manager stops listening.
119
120 The async iterator blocks until a signal arrives, and then yields it.
121
122 Note that if you leave the ``with`` block while the iterator has
123 unextracted signals still pending inside it, then they will be
124 re-delivered using Python's regular signal handling logic. This avoids a
125 race condition when signals arrives just before we exit the ``with``
126 block.
127
128 Args:
129 signals: the signals to listen for.
130
131 Raises:
132 RuntimeError: if you try to use this anywhere except Python's main
133 thread. (This is a Python limitation.)
134
135 Example:
136
137 A common convention for Unix daemons is that they should reload their
138 configuration when they receive a ``SIGHUP``. Here's a sketch of what
139 that might look like using :func:`open_signal_receiver`::
140
141 with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter:
142 async for signum in signal_aiter:
143 assert signum == signal.SIGHUP
144 reload_configuration()
145
146 """
147 if not is_main_thread():
148 raise RuntimeError(
149 "Sorry, open_signal_receiver is only possible when running in "
150 "Python interpreter's main thread"
151 )
152 token = trio.lowlevel.current_trio_token()
153 queue = SignalReceiver()
154
155 def handler(signum, _):
156 token.run_sync_soon(queue._add, signum, idempotent=True)
157
158 try:
159 with _signal_handler(signals, handler):
160 yield queue
161 finally:
162 queue._redeliver_remaining()
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/trio/_signals.py b/trio/_signals.py
--- a/trio/_signals.py
+++ b/trio/_signals.py
@@ -129,6 +129,8 @@
signals: the signals to listen for.
Raises:
+ TypeError: if no signals were provided.
+
RuntimeError: if you try to use this anywhere except Python's main
thread. (This is a Python limitation.)
@@ -144,6 +146,9 @@
reload_configuration()
"""
+ if not signals:
+ raise TypeError("No signals were provided")
+
if not is_main_thread():
raise RuntimeError(
"Sorry, open_signal_receiver is only possible when running in "
| {"golden_diff": "diff --git a/trio/_signals.py b/trio/_signals.py\n--- a/trio/_signals.py\n+++ b/trio/_signals.py\n@@ -129,6 +129,8 @@\n signals: the signals to listen for.\n \n Raises:\n+ TypeError: if no signals were provided.\n+\n RuntimeError: if you try to use this anywhere except Python's main\n thread. (This is a Python limitation.)\n \n@@ -144,6 +146,9 @@\n reload_configuration()\n \n \"\"\"\n+ if not signals:\n+ raise TypeError(\"No signals were provided\")\n+\n if not is_main_thread():\n raise RuntimeError(\n \"Sorry, open_signal_receiver is only possible when running in \"\n", "issue": "trio.open_signal_receiver should give an error if no arguments are passed\nUser confusion spotted in the wild: https://gitter.im/python-trio/general?at=5ebfa9d613878c30b581b9fe\r\n\n", "before_files": [{"content": "import signal\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\n\nimport trio\nfrom ._util import signal_raise, is_main_thread, ConflictDetector\n\n# Discussion of signal handling strategies:\n#\n# - On Windows signals barely exist. There are no options; signal handlers are\n# the only available API.\n#\n# - On Linux signalfd is arguably the natural way. Semantics: signalfd acts as\n# an *alternative* signal delivery mechanism. The way you use it is to mask\n# out the relevant signals process-wide (so that they don't get delivered\n# the normal way), and then when you read from signalfd that actually counts\n# as delivering it (despite the mask). The problem with this is that we\n# don't have any reliable way to mask out signals process-wide -- the only\n# way to do that in Python is to call pthread_sigmask from the main thread\n# *before starting any other threads*, and as a library we can't really\n# impose that, and the failure mode is annoying (signals get delivered via\n# signal handlers whether we want them to or not).\n#\n# - on macOS/*BSD, kqueue is the natural way. Semantics: kqueue acts as an\n# *extra* signal delivery mechanism. Signals are delivered the normal\n# way, *and* are delivered to kqueue. So you want to set them to SIG_IGN so\n# that they don't end up pending forever (I guess?). I can't find any actual\n# docs on how masking and EVFILT_SIGNAL interact. I did see someone note\n# that if a signal is pending when the kqueue filter is added then you\n# *don't* get notified of that, which makes sense. But still, we have to\n# manipulate signal state (e.g. setting SIG_IGN) which as far as Python is\n# concerned means we have to do this from the main thread.\n#\n# So in summary, there don't seem to be any compelling advantages to using the\n# platform-native signal notification systems; they're kinda nice, but it's\n# simpler to implement the naive signal-handler-based system once and be\n# done. (The big advantage would be if there were a reliable way to monitor\n# for SIGCHLD from outside the main thread and without interfering with other\n# libraries that also want to monitor for SIGCHLD. But there isn't. I guess\n# kqueue might give us that, but in kqueue we don't need it, because kqueue\n# can directly monitor for child process state changes.)\n\n\n@contextmanager\ndef _signal_handler(signals, handler):\n original_handlers = {}\n try:\n for signum in set(signals):\n original_handlers[signum] = signal.signal(signum, handler)\n yield\n finally:\n for signum, original_handler in original_handlers.items():\n signal.signal(signum, original_handler)\n\n\nclass SignalReceiver:\n def __init__(self):\n # {signal num: None}\n self._pending = OrderedDict()\n self._lot = trio.lowlevel.ParkingLot()\n self._conflict_detector = ConflictDetector(\n \"only one task can iterate on a signal receiver at a time\"\n )\n self._closed = False\n\n def _add(self, signum):\n if self._closed:\n signal_raise(signum)\n else:\n self._pending[signum] = None\n self._lot.unpark()\n\n def _redeliver_remaining(self):\n # First make sure that any signals still in the delivery pipeline will\n # get redelivered\n self._closed = True\n\n # And then redeliver any that are sitting in pending. This is done\n # using a weird recursive construct to make sure we process everything\n # even if some of the handlers raise exceptions.\n def deliver_next():\n if self._pending:\n signum, _ = self._pending.popitem(last=False)\n try:\n signal_raise(signum)\n finally:\n deliver_next()\n\n deliver_next()\n\n # Helper for tests, not public or otherwise used\n def _pending_signal_count(self):\n return len(self._pending)\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n if self._closed:\n raise RuntimeError(\"open_signal_receiver block already exited\")\n # In principle it would be possible to support multiple concurrent\n # calls to __anext__, but doing it without race conditions is quite\n # tricky, and there doesn't seem to be any point in trying.\n with self._conflict_detector:\n if not self._pending:\n await self._lot.park()\n else:\n await trio.lowlevel.checkpoint()\n signum, _ = self._pending.popitem(last=False)\n return signum\n\n\n@contextmanager\ndef open_signal_receiver(*signals):\n \"\"\"A context manager for catching signals.\n\n Entering this context manager starts listening for the given signals and\n returns an async iterator; exiting the context manager stops listening.\n\n The async iterator blocks until a signal arrives, and then yields it.\n\n Note that if you leave the ``with`` block while the iterator has\n unextracted signals still pending inside it, then they will be\n re-delivered using Python's regular signal handling logic. This avoids a\n race condition when signals arrives just before we exit the ``with``\n block.\n\n Args:\n signals: the signals to listen for.\n\n Raises:\n RuntimeError: if you try to use this anywhere except Python's main\n thread. (This is a Python limitation.)\n\n Example:\n\n A common convention for Unix daemons is that they should reload their\n configuration when they receive a ``SIGHUP``. Here's a sketch of what\n that might look like using :func:`open_signal_receiver`::\n\n with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter:\n async for signum in signal_aiter:\n assert signum == signal.SIGHUP\n reload_configuration()\n\n \"\"\"\n if not is_main_thread():\n raise RuntimeError(\n \"Sorry, open_signal_receiver is only possible when running in \"\n \"Python interpreter's main thread\"\n )\n token = trio.lowlevel.current_trio_token()\n queue = SignalReceiver()\n\n def handler(signum, _):\n token.run_sync_soon(queue._add, signum, idempotent=True)\n\n try:\n with _signal_handler(signals, handler):\n yield queue\n finally:\n queue._redeliver_remaining()\n", "path": "trio/_signals.py"}], "after_files": [{"content": "import signal\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\n\nimport trio\nfrom ._util import signal_raise, is_main_thread, ConflictDetector\n\n# Discussion of signal handling strategies:\n#\n# - On Windows signals barely exist. There are no options; signal handlers are\n# the only available API.\n#\n# - On Linux signalfd is arguably the natural way. Semantics: signalfd acts as\n# an *alternative* signal delivery mechanism. The way you use it is to mask\n# out the relevant signals process-wide (so that they don't get delivered\n# the normal way), and then when you read from signalfd that actually counts\n# as delivering it (despite the mask). The problem with this is that we\n# don't have any reliable way to mask out signals process-wide -- the only\n# way to do that in Python is to call pthread_sigmask from the main thread\n# *before starting any other threads*, and as a library we can't really\n# impose that, and the failure mode is annoying (signals get delivered via\n# signal handlers whether we want them to or not).\n#\n# - on macOS/*BSD, kqueue is the natural way. Semantics: kqueue acts as an\n# *extra* signal delivery mechanism. Signals are delivered the normal\n# way, *and* are delivered to kqueue. So you want to set them to SIG_IGN so\n# that they don't end up pending forever (I guess?). I can't find any actual\n# docs on how masking and EVFILT_SIGNAL interact. I did see someone note\n# that if a signal is pending when the kqueue filter is added then you\n# *don't* get notified of that, which makes sense. But still, we have to\n# manipulate signal state (e.g. setting SIG_IGN) which as far as Python is\n# concerned means we have to do this from the main thread.\n#\n# So in summary, there don't seem to be any compelling advantages to using the\n# platform-native signal notification systems; they're kinda nice, but it's\n# simpler to implement the naive signal-handler-based system once and be\n# done. (The big advantage would be if there were a reliable way to monitor\n# for SIGCHLD from outside the main thread and without interfering with other\n# libraries that also want to monitor for SIGCHLD. But there isn't. I guess\n# kqueue might give us that, but in kqueue we don't need it, because kqueue\n# can directly monitor for child process state changes.)\n\n\n@contextmanager\ndef _signal_handler(signals, handler):\n original_handlers = {}\n try:\n for signum in set(signals):\n original_handlers[signum] = signal.signal(signum, handler)\n yield\n finally:\n for signum, original_handler in original_handlers.items():\n signal.signal(signum, original_handler)\n\n\nclass SignalReceiver:\n def __init__(self):\n # {signal num: None}\n self._pending = OrderedDict()\n self._lot = trio.lowlevel.ParkingLot()\n self._conflict_detector = ConflictDetector(\n \"only one task can iterate on a signal receiver at a time\"\n )\n self._closed = False\n\n def _add(self, signum):\n if self._closed:\n signal_raise(signum)\n else:\n self._pending[signum] = None\n self._lot.unpark()\n\n def _redeliver_remaining(self):\n # First make sure that any signals still in the delivery pipeline will\n # get redelivered\n self._closed = True\n\n # And then redeliver any that are sitting in pending. This is done\n # using a weird recursive construct to make sure we process everything\n # even if some of the handlers raise exceptions.\n def deliver_next():\n if self._pending:\n signum, _ = self._pending.popitem(last=False)\n try:\n signal_raise(signum)\n finally:\n deliver_next()\n\n deliver_next()\n\n # Helper for tests, not public or otherwise used\n def _pending_signal_count(self):\n return len(self._pending)\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n if self._closed:\n raise RuntimeError(\"open_signal_receiver block already exited\")\n # In principle it would be possible to support multiple concurrent\n # calls to __anext__, but doing it without race conditions is quite\n # tricky, and there doesn't seem to be any point in trying.\n with self._conflict_detector:\n if not self._pending:\n await self._lot.park()\n else:\n await trio.lowlevel.checkpoint()\n signum, _ = self._pending.popitem(last=False)\n return signum\n\n\n@contextmanager\ndef open_signal_receiver(*signals):\n \"\"\"A context manager for catching signals.\n\n Entering this context manager starts listening for the given signals and\n returns an async iterator; exiting the context manager stops listening.\n\n The async iterator blocks until a signal arrives, and then yields it.\n\n Note that if you leave the ``with`` block while the iterator has\n unextracted signals still pending inside it, then they will be\n re-delivered using Python's regular signal handling logic. This avoids a\n race condition when signals arrives just before we exit the ``with``\n block.\n\n Args:\n signals: the signals to listen for.\n\n Raises:\n TypeError: if no signals were provided.\n\n RuntimeError: if you try to use this anywhere except Python's main\n thread. (This is a Python limitation.)\n\n Example:\n\n A common convention for Unix daemons is that they should reload their\n configuration when they receive a ``SIGHUP``. Here's a sketch of what\n that might look like using :func:`open_signal_receiver`::\n\n with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter:\n async for signum in signal_aiter:\n assert signum == signal.SIGHUP\n reload_configuration()\n\n \"\"\"\n if not signals:\n raise TypeError(\"No signals were provided\")\n\n if not is_main_thread():\n raise RuntimeError(\n \"Sorry, open_signal_receiver is only possible when running in \"\n \"Python interpreter's main thread\"\n )\n token = trio.lowlevel.current_trio_token()\n queue = SignalReceiver()\n\n def handler(signum, _):\n token.run_sync_soon(queue._add, signum, idempotent=True)\n\n try:\n with _signal_handler(signals, handler):\n yield queue\n finally:\n queue._redeliver_remaining()\n", "path": "trio/_signals.py"}]} | 2,125 | 159 |
gh_patches_debug_30561 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Edit Organization: url is not displayed
When edit an organization which has an url, the url is not displayed in the field
Dataset Creation [BUG]: Can not save multiple countries
1. Dataset creation: When adding multiple countries, it is saving only one
2. Dataset edit: When adding multiple countries, it is saving only one
3. Dataset creation, steps:
a. add one country
b. click on next:add data (no filling other fields)
c. it will reset the country selection (not ok!)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/plugin.py`
Content:
```
1 import ckanext.hdx_package.helpers.licenses as hdx_licenses
2
3 import ckan.plugins as plugins
4 import ckan.plugins.toolkit as toolkit
5 import ckan.model.package as package
6 import ckan.model.license as license
7 import pylons.config as config
8 import version
9
10 import ckanext.hdx_package.helpers.caching as caching
11 import ckanext.hdx_theme.helpers.auth as auth
12
13
14 # def run_on_startup():
15 # cache_on_startup = config.get('hdx.cache.onstartup', 'true')
16 # if 'true' == cache_on_startup:
17 # _generate_license_list()
18 # caching.cached_get_group_package_stuff()
19
20
21 # def _generate_license_list():
22 # package.Package._license_register = license.LicenseRegister()
23 # package.Package._license_register.licenses = [
24 # license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
25 # license.License(license.LicenseCreativeCommonsAttribution()),
26 # license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
27 # license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
28 # license.License(hdx_licenses.LicenseHdxMultiple()),
29 # license.License(hdx_licenses.LicenseHdxOther())
30 # ]
31
32 class HDXThemePlugin(plugins.SingletonPlugin):
33 plugins.implements(plugins.IConfigurer)
34 plugins.implements(plugins.IRoutes, inherit=True)
35 plugins.implements(plugins.ITemplateHelpers)
36 plugins.implements(plugins.IActions)
37 plugins.implements(plugins.IAuthFunctions)
38 plugins.implements(plugins.IGroupController, inherit=True)
39 plugins.implements(plugins.IMiddleware, inherit=True)
40
41 def update_config(self, config):
42 toolkit.add_template_directory(config, 'templates')
43 toolkit.add_template_directory(config, 'templates_legacy')
44 toolkit.add_public_directory(config, 'public')
45 toolkit.add_resource('fanstatic', 'hdx_theme')
46
47
48 def before_map(self, map):
49 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
50 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
51 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
52 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
53 #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
54 #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
55
56 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
57 map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
58
59 #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
60
61 return map
62
63 def create(self, entity):
64 caching.invalidate_group_caches()
65
66 def edit(self, entity):
67 caching.invalidate_group_caches()
68
69 def get_helpers(self):
70 from ckanext.hdx_theme.helpers import helpers as hdx_helpers
71 return {
72 'is_downloadable': hdx_helpers.is_downloadable,
73 'get_facet_items_dict': hdx_helpers.get_facet_items_dict,
74 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
75 'get_filtered_params_list': hdx_helpers.get_filtered_params_list,
76 'get_last_revision_package': hdx_helpers.get_last_revision_package,
77 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
78 'get_last_revision_group': hdx_helpers.get_last_revision_group,
79 'get_group_followers': hdx_helpers.get_group_followers,
80 'get_group_members': hdx_helpers.get_group_members,
81 'markdown_extract_strip': hdx_helpers.markdown_extract_strip,
82 'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,
83 'hdx_version': hdx_helpers.hdx_version,
84 'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,
85 'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,
86 'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,
87 'hdx_get_user_info': hdx_helpers.hdx_get_user_info,
88 'hdx_linked_user': hdx_helpers.hdx_linked_user,
89 'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,
90 'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,
91 'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,
92 'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,
93 'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,
94 'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config
95 }
96
97 def get_actions(self):
98 from ckanext.hdx_theme.helpers import actions as hdx_actions
99 return {
100 'organization_list_for_user':hdx_actions.organization_list_for_user,
101 'cached_group_list': hdx_actions.cached_group_list,
102 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,
103 'member_list': hdx_actions.member_list,
104 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,
105 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,
106 'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,
107 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,
108 'hdx_user_show' : hdx_actions.hdx_user_show
109
110 }
111 def get_auth_functions(self):
112 return {
113 'hdx_basic_user_info': auth.hdx_basic_user_info,
114 'group_member_create': auth.group_member_create,
115 'hdx_send_new_org_request': auth.hdx_send_new_org_request,
116 'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,
117 'hdx_send_request_membership': auth.hdx_send_request_membership
118 }
119
120 # def make_middleware(self, app, config):
121 # run_on_startup()
122 # return app
123
124
125
126
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -47,13 +47,13 @@
def before_map(self, map):
map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
- map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
- map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
- map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
+ map.connect('/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')
+ map.connect('/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')
+ map.connect('/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')
#map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
#map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
- map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
+ map.connect('/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')
map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
#map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -47,13 +47,13 @@\n \n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n- map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n- map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n- map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n+ map.connect('/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')\n+ map.connect('/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')\n+ map.connect('/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')\n #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n- map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n+ map.connect('/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n \n #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n", "issue": "Edit Organization: url is not displayed\nWhen edit an organization which has an url, the url is not displayed in the field\n\nDataset Creation [BUG]: Can not save multiple countries \n1. Dataset creation: When adding multiple countries, it is saving only one\n2. Dataset edit: When adding multiple countries, it is saving only one\n3. Dataset creation, steps:\n a. add one country\n b. click on next:add data (no filling other fields)\n c. it will reset the country selection (not ok!)\n\n", "before_files": [{"content": "import ckanext.hdx_package.helpers.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_package.helpers.caching as caching\nimport ckanext.hdx_theme.helpers.auth as auth\n\n\n# def run_on_startup():\n# cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n# if 'true' == cache_on_startup:\n# _generate_license_list()\n# caching.cached_get_group_package_stuff()\n\n\n# def _generate_license_list():\n# package.Package._license_register = license.LicenseRegister() \n# package.Package._license_register.licenses = [\n# license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n# license.License(license.LicenseCreativeCommonsAttribution()),\n# license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n# license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n# license.License(hdx_licenses.LicenseHdxMultiple()),\n# license.License(hdx_licenses.LicenseHdxOther())\n# ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_template_directory(config, 'templates_legacy')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme.helpers import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict': hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list': hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package': hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group': hdx_helpers.get_last_revision_group,\n 'get_group_followers': hdx_helpers.get_group_followers,\n 'get_group_members': hdx_helpers.get_group_members,\n 'markdown_extract_strip': hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,\n 'hdx_version': hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info': hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user': hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,\n 'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,\n 'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,\n 'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config\n }\n \n def get_actions(self):\n from ckanext.hdx_theme.helpers import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,\n 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,\n 'hdx_user_show' : hdx_actions.hdx_user_show\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info,\n 'group_member_create': auth.group_member_create,\n 'hdx_send_new_org_request': auth.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': auth.hdx_send_request_membership\n }\n \n # def make_middleware(self, app, config):\n # run_on_startup()\n # return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}], "after_files": [{"content": "import ckanext.hdx_package.helpers.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_package.helpers.caching as caching\nimport ckanext.hdx_theme.helpers.auth as auth\n\n\n# def run_on_startup():\n# cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n# if 'true' == cache_on_startup:\n# _generate_license_list()\n# caching.cached_get_group_package_stuff()\n\n\n# def _generate_license_list():\n# package.Package._license_register = license.LicenseRegister() \n# package.Package._license_register.licenses = [\n# license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n# license.License(license.LicenseCreativeCommonsAttribution()),\n# license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n# license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n# license.License(hdx_licenses.LicenseHdxMultiple()),\n# license.License(hdx_licenses.LicenseHdxOther())\n# ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_template_directory(config, 'templates_legacy')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')\n #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme.helpers import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict': hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list': hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package': hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group': hdx_helpers.get_last_revision_group,\n 'get_group_followers': hdx_helpers.get_group_followers,\n 'get_group_members': hdx_helpers.get_group_members,\n 'markdown_extract_strip': hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,\n 'hdx_version': hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info': hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user': hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,\n 'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,\n 'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,\n 'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config\n }\n \n def get_actions(self):\n from ckanext.hdx_theme.helpers import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,\n 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,\n 'hdx_user_show' : hdx_actions.hdx_user_show\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info,\n 'group_member_create': auth.group_member_create,\n 'hdx_send_new_org_request': auth.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': auth.hdx_send_request_membership\n }\n \n # def make_middleware(self, app, config):\n # run_on_startup()\n # return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]} | 2,083 | 463 |
gh_patches_debug_34844 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ENH] Naming conventions
I wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`.
Also, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there.
Similarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`.
I'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/io.py`
Content:
```
1 import os
2 from glob import glob
3
4 import pandas as pd
5
6
7 def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):
8 """
9 :param filespath: The filepath pattern matching the CSVs files.
10 Accepts regular expressions, with or without csv extension.
11 :param seperate_df: If False (default) returns a single Dataframe
12 with the concatenation of the csv files.
13 If True, returns a dictionary of seperate dataframes
14 for each CSV file.
15 :param kwargs: Keyword arguments to pass into the
16 original pandas `read_csv`.
17 """
18 # Sanitize input
19 assert filespath is not None
20 assert len(filespath) != 0
21
22 # Read the csv files
23 dfs = {
24 os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)
25 }
26 # Check if dataframes have been read
27 if len(dfs) == 0:
28 raise ValueError("No CSV files to read with the given filespath")
29 # Concatenate the dataframes if requested (default)
30 col_names = list(dfs.values())[0].columns
31 if not seperate_df:
32 # If columns do not match raise an error
33 for df in dfs.values():
34 if not all(df.columns == col_names):
35 raise ValueError(
36 "Columns in input CSV files do not match."
37 "Files cannot be concatenated"
38 )
39 return pd.concat(list(dfs.values()), ignore_index=True, sort=False)
40 else:
41 return dfs
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/io.py b/janitor/io.py
--- a/janitor/io.py
+++ b/janitor/io.py
@@ -1,34 +1,49 @@
import os
from glob import glob
+from typing import Iterable, Union
import pandas as pd
+from .utils import deprecated_alias
-def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):
+
+@deprecated_alias(seperate_df="separate_df", filespath="files_path")
+def read_csvs(
+ files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs
+) -> Union[pd.DataFrame, dict]:
"""
- :param filespath: The filepath pattern matching the CSVs files.
+ :param files_path: The filepath pattern matching the CSVs files.
Accepts regular expressions, with or without csv extension.
- :param seperate_df: If False (default) returns a single Dataframe
+ Also accepts iterable of file paths.
+ :param separate_df: If False (default) returns a single Dataframe
with the concatenation of the csv files.
- If True, returns a dictionary of seperate dataframes
+ If True, returns a dictionary of separate dataframes
for each CSV file.
:param kwargs: Keyword arguments to pass into the
original pandas `read_csv`.
"""
# Sanitize input
- assert filespath is not None
- assert len(filespath) != 0
+ assert files_path is not None
+ assert len(files_path) != 0
# Read the csv files
- dfs = {
- os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)
- }
+ # String to file/folder or file pattern provided
+ if isinstance(files_path, str):
+ dfs = {
+ os.path.basename(f): pd.read_csv(f, **kwargs)
+ for f in glob(files_path)
+ }
+ # Iterable of file paths provided
+ else:
+ dfs = {
+ os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path
+ }
# Check if dataframes have been read
if len(dfs) == 0:
- raise ValueError("No CSV files to read with the given filespath")
+ raise ValueError("No CSV files to read with the given `files_path`")
# Concatenate the dataframes if requested (default)
col_names = list(dfs.values())[0].columns
- if not seperate_df:
+ if not separate_df:
# If columns do not match raise an error
for df in dfs.values():
if not all(df.columns == col_names):
| {"golden_diff": "diff --git a/janitor/io.py b/janitor/io.py\n--- a/janitor/io.py\n+++ b/janitor/io.py\n@@ -1,34 +1,49 @@\n import os\n from glob import glob\n+from typing import Iterable, Union\n \n import pandas as pd\n \n+from .utils import deprecated_alias\n \n-def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n+\n+@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\n+def read_csvs(\n+ files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n+) -> Union[pd.DataFrame, dict]:\n \"\"\"\n- :param filespath: The filepath pattern matching the CSVs files.\n+ :param files_path: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n- :param seperate_df: If False (default) returns a single Dataframe\n+ Also accepts iterable of file paths.\n+ :param separate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n- If True, returns a dictionary of seperate dataframes\n+ If True, returns a dictionary of separate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n- assert filespath is not None\n- assert len(filespath) != 0\n+ assert files_path is not None\n+ assert len(files_path) != 0\n \n # Read the csv files\n- dfs = {\n- os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n- }\n+ # String to file/folder or file pattern provided\n+ if isinstance(files_path, str):\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs)\n+ for f in glob(files_path)\n+ }\n+ # Iterable of file paths provided\n+ else:\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n+ }\n # Check if dataframes have been read\n if len(dfs) == 0:\n- raise ValueError(\"No CSV files to read with the given filespath\")\n+ raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n- if not seperate_df:\n+ if not separate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n", "issue": "[ENH] Naming conventions\nI wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`.\r\n\r\nAlso, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there.\r\n\r\nSimilarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`.\r\n\r\nI'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas.\n", "before_files": [{"content": "import os\nfrom glob import glob\n\nimport pandas as pd\n\n\ndef read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n \"\"\"\n :param filespath: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n :param seperate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n If True, returns a dictionary of seperate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n assert filespath is not None\n assert len(filespath) != 0\n\n # Read the csv files\n dfs = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n }\n # Check if dataframes have been read\n if len(dfs) == 0:\n raise ValueError(\"No CSV files to read with the given filespath\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n if not seperate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(list(dfs.values()), ignore_index=True, sort=False)\n else:\n return dfs\n", "path": "janitor/io.py"}], "after_files": [{"content": "import os\nfrom glob import glob\nfrom typing import Iterable, Union\n\nimport pandas as pd\n\nfrom .utils import deprecated_alias\n\n\n@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\ndef read_csvs(\n files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n) -> Union[pd.DataFrame, dict]:\n \"\"\"\n :param files_path: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n Also accepts iterable of file paths.\n :param separate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n If True, returns a dictionary of separate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n assert files_path is not None\n assert len(files_path) != 0\n\n # Read the csv files\n # String to file/folder or file pattern provided\n if isinstance(files_path, str):\n dfs = {\n os.path.basename(f): pd.read_csv(f, **kwargs)\n for f in glob(files_path)\n }\n # Iterable of file paths provided\n else:\n dfs = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n }\n # Check if dataframes have been read\n if len(dfs) == 0:\n raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n if not separate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(list(dfs.values()), ignore_index=True, sort=False)\n else:\n return dfs\n", "path": "janitor/io.py"}]} | 813 | 601 |
gh_patches_debug_15610 | rasdani/github-patches | git_diff | celery__celery-3705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mongodb result backend with pickle serialisation broken in 4.0.0
Just upgraded to celery 4.0.0. Mongodb result backend with pickle no longer works with the exception at the end of this message.
This is because commit https://github.com/celery/celery/commit/639b40f6308267312a1030bb3d6ac5805069510a removes the **essential** Binary() wrapper from the data written to mongo. As a result it writes raw binary data to mongo as a string, which then rejects it.
I'm using settings:
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
Exception seen:
> [2016-11-10 16:01:02,118: ERROR/MainProcess] Pool callback raised exception: InvalidStringData("strings in documents must be valid UTF-8: '\\x80\\x02N.'",)
> Traceback (most recent call last):
> File "/usr/lib/python2.7/site-packages/billiard/pool.py", line 1748, in safe_apply_callback
> fun(*args, **kwargs)
> File "/usr/lib/python2.7/site-packages/celery/worker/request.py", line 366, in on_failure
> self.id, exc, request=self, store_result=self.store_errors,
> File "/usr/lib/python2.7/site-packages/celery/backends/base.py", line 163, in mark_as_failure
> traceback=traceback, request=request)
> File "/usr/lib/python2.7/site-packages/celery/backends/base.py", line 309, in store_result
> request=request, **kwargs)
> File "/usr/lib/python2.7/site-packages/celery/backends/mongodb.py", line 175, in _store_result
> self.collection.save(meta)
> File "/usr/lib/python2.7/site-packages/pymongo/collection.py", line 2192, in save
> check_keys, False, manipulate, write_concern)
> File "/usr/lib/python2.7/site-packages/pymongo/collection.py", line 715, in _update
> codec_options=self.__write_response_codec_options).copy()
> File "/usr/lib/python2.7/site-packages/pymongo/pool.py", line 244, in command
> self._raise_connection_failure(error)
> File "/usr/lib/python2.7/site-packages/pymongo/pool.py", line 372, in _raise_connection_failure
> raise error
> InvalidStringData: strings in documents must be valid UTF-8: '\x80\x02N.'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/backends/mongodb.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """MongoDB result store backend."""
3 from __future__ import absolute_import, unicode_literals
4 from datetime import datetime, timedelta
5 from kombu.utils.objects import cached_property
6 from kombu.utils.url import maybe_sanitize_url
7 from kombu.exceptions import EncodeError
8 from celery import states
9 from celery.exceptions import ImproperlyConfigured
10 from celery.five import string_t, items
11 from .base import BaseBackend
12
13 try:
14 import pymongo
15 except ImportError: # pragma: no cover
16 pymongo = None # noqa
17
18 if pymongo:
19 try:
20 from bson.binary import Binary
21 except ImportError: # pragma: no cover
22 from pymongo.binary import Binary # noqa
23 from pymongo.errors import InvalidDocument # noqa
24 else: # pragma: no cover
25 Binary = None # noqa
26
27 class InvalidDocument(Exception): # noqa
28 pass
29
30 __all__ = ['MongoBackend']
31
32
33 class MongoBackend(BaseBackend):
34 """MongoDB result backend.
35
36 Raises:
37 celery.exceptions.ImproperlyConfigured:
38 if module :pypi:`pymongo` is not available.
39 """
40
41 mongo_host = None
42 host = 'localhost'
43 port = 27017
44 user = None
45 password = None
46 database_name = 'celery'
47 taskmeta_collection = 'celery_taskmeta'
48 groupmeta_collection = 'celery_groupmeta'
49 max_pool_size = 10
50 options = None
51
52 supports_autoexpire = False
53
54 _connection = None
55
56 def __init__(self, app=None, **kwargs):
57 self.options = {}
58
59 super(MongoBackend, self).__init__(app, **kwargs)
60
61 if not pymongo:
62 raise ImproperlyConfigured(
63 'You need to install the pymongo library to use the '
64 'MongoDB backend.')
65
66 # Set option defaults
67 for key, value in items(self._prepare_client_options()):
68 self.options.setdefault(key, value)
69
70 # update conf with mongo uri data, only if uri was given
71 if self.url:
72 if self.url == 'mongodb://':
73 self.url += 'localhost'
74
75 uri_data = pymongo.uri_parser.parse_uri(self.url)
76 # build the hosts list to create a mongo connection
77 hostslist = [
78 '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']
79 ]
80 self.user = uri_data['username']
81 self.password = uri_data['password']
82 self.mongo_host = hostslist
83 if uri_data['database']:
84 # if no database is provided in the uri, use default
85 self.database_name = uri_data['database']
86
87 self.options.update(uri_data['options'])
88
89 # update conf with specific settings
90 config = self.app.conf.get('mongodb_backend_settings')
91 if config is not None:
92 if not isinstance(config, dict):
93 raise ImproperlyConfigured(
94 'MongoDB backend settings should be grouped in a dict')
95 config = dict(config) # don't modify original
96
97 if 'host' in config or 'port' in config:
98 # these should take over uri conf
99 self.mongo_host = None
100
101 self.host = config.pop('host', self.host)
102 self.port = config.pop('port', self.port)
103 self.mongo_host = config.pop('mongo_host', self.mongo_host)
104 self.user = config.pop('user', self.user)
105 self.password = config.pop('password', self.password)
106 self.database_name = config.pop('database', self.database_name)
107 self.taskmeta_collection = config.pop(
108 'taskmeta_collection', self.taskmeta_collection,
109 )
110 self.groupmeta_collection = config.pop(
111 'groupmeta_collection', self.groupmeta_collection,
112 )
113
114 self.options.update(config.pop('options', {}))
115 self.options.update(config)
116
117 def _prepare_client_options(self):
118 if pymongo.version_tuple >= (3,):
119 return {'maxPoolSize': self.max_pool_size}
120 else: # pragma: no cover
121 return {'max_pool_size': self.max_pool_size,
122 'auto_start_request': False}
123
124 def _get_connection(self):
125 """Connect to the MongoDB server."""
126 if self._connection is None:
127 from pymongo import MongoClient
128
129 host = self.mongo_host
130 if not host:
131 # The first pymongo.Connection() argument (host) can be
132 # a list of ['host:port'] elements or a mongodb connection
133 # URI. If this is the case, don't use self.port
134 # but let pymongo get the port(s) from the URI instead.
135 # This enables the use of replica sets and sharding.
136 # See pymongo.Connection() for more info.
137 host = self.host
138 if isinstance(host, string_t) \
139 and not host.startswith('mongodb://'):
140 host = 'mongodb://{0}:{1}'.format(host, self.port)
141 # don't change self.options
142 conf = dict(self.options)
143 conf['host'] = host
144
145 self._connection = MongoClient(**conf)
146
147 return self._connection
148
149 def encode(self, data):
150 if self.serializer == 'bson':
151 # mongodb handles serialization
152 return data
153 return super(MongoBackend, self).encode(data)
154
155 def decode(self, data):
156 if self.serializer == 'bson':
157 return data
158 return super(MongoBackend, self).decode(data)
159
160 def _store_result(self, task_id, result, state,
161 traceback=None, request=None, **kwargs):
162 """Store return value and state of an executed task."""
163 meta = {
164 '_id': task_id,
165 'status': state,
166 'result': self.encode(result),
167 'date_done': datetime.utcnow(),
168 'traceback': self.encode(traceback),
169 'children': self.encode(
170 self.current_task_children(request),
171 ),
172 }
173
174 try:
175 self.collection.save(meta)
176 except InvalidDocument as exc:
177 raise EncodeError(exc)
178
179 return result
180
181 def _get_task_meta_for(self, task_id):
182 """Get task meta-data for a task by id."""
183 obj = self.collection.find_one({'_id': task_id})
184 if obj:
185 return self.meta_from_decoded({
186 'task_id': obj['_id'],
187 'status': obj['status'],
188 'result': self.decode(obj['result']),
189 'date_done': obj['date_done'],
190 'traceback': self.decode(obj['traceback']),
191 'children': self.decode(obj['children']),
192 })
193 return {'status': states.PENDING, 'result': None}
194
195 def _save_group(self, group_id, result):
196 """Save the group result."""
197 self.group_collection.save({
198 '_id': group_id,
199 'result': self.encode([i.id for i in result]),
200 'date_done': datetime.utcnow(),
201 })
202 return result
203
204 def _restore_group(self, group_id):
205 """Get the result for a group by id."""
206 obj = self.group_collection.find_one({'_id': group_id})
207 if obj:
208 return {
209 'task_id': obj['_id'],
210 'date_done': obj['date_done'],
211 'result': [
212 self.app.AsyncResult(task)
213 for task in self.decode(obj['result'])
214 ],
215 }
216
217 def _delete_group(self, group_id):
218 """Delete a group by id."""
219 self.group_collection.remove({'_id': group_id})
220
221 def _forget(self, task_id):
222 """Remove result from MongoDB.
223
224 Raises:
225 pymongo.exceptions.OperationsError:
226 if the task_id could not be removed.
227 """
228 # By using safe=True, this will wait until it receives a response from
229 # the server. Likewise, it will raise an OperationsError if the
230 # response was unable to be completed.
231 self.collection.remove({'_id': task_id})
232
233 def cleanup(self):
234 """Delete expired meta-data."""
235 self.collection.remove(
236 {'date_done': {'$lt': self.app.now() - self.expires_delta}},
237 )
238 self.group_collection.remove(
239 {'date_done': {'$lt': self.app.now() - self.expires_delta}},
240 )
241
242 def __reduce__(self, args=(), kwargs={}):
243 return super(MongoBackend, self).__reduce__(
244 args, dict(kwargs, expires=self.expires, url=self.url))
245
246 def _get_database(self):
247 conn = self._get_connection()
248 db = conn[self.database_name]
249 if self.user and self.password:
250 if not db.authenticate(self.user, self.password):
251 raise ImproperlyConfigured(
252 'Invalid MongoDB username or password.')
253 return db
254
255 @cached_property
256 def database(self):
257 """Get database from MongoDB connection.
258
259 performs authentication if necessary.
260 """
261 return self._get_database()
262
263 @cached_property
264 def collection(self):
265 """Get the meta-data task collection."""
266 collection = self.database[self.taskmeta_collection]
267
268 # Ensure an index on date_done is there, if not process the index
269 # in the background. Once completed cleanup will be much faster
270 collection.ensure_index('date_done', background='true')
271 return collection
272
273 @cached_property
274 def group_collection(self):
275 """Get the meta-data task collection."""
276 collection = self.database[self.groupmeta_collection]
277
278 # Ensure an index on date_done is there, if not process the index
279 # in the background. Once completed cleanup will be much faster
280 collection.ensure_index('date_done', background='true')
281 return collection
282
283 @cached_property
284 def expires_delta(self):
285 return timedelta(seconds=self.expires)
286
287 def as_uri(self, include_password=False):
288 """Return the backend as an URI.
289
290 Arguments:
291 include_password (bool): Password censored if disabled.
292 """
293 if not self.url:
294 return 'mongodb://'
295 if include_password:
296 return self.url
297
298 if ',' not in self.url:
299 return maybe_sanitize_url(self.url)
300
301 uri1, remainder = self.url.split(',', 1)
302 return ','.join([maybe_sanitize_url(uri1), remainder])
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -29,6 +29,8 @@
__all__ = ['MongoBackend']
+BINARY_CODECS = frozenset(['pickle', 'msgpack'])
+
class MongoBackend(BaseBackend):
"""MongoDB result backend.
@@ -150,7 +152,12 @@
if self.serializer == 'bson':
# mongodb handles serialization
return data
- return super(MongoBackend, self).encode(data)
+ payload = super(MongoBackend, self).encode(data)
+
+ # serializer which are in a unsupported format (pickle/binary)
+ if self.serializer in BINARY_CODECS:
+ payload = Binary(payload)
+ return payload
def decode(self, data):
if self.serializer == 'bson':
| {"golden_diff": "diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py\n--- a/celery/backends/mongodb.py\n+++ b/celery/backends/mongodb.py\n@@ -29,6 +29,8 @@\n \n __all__ = ['MongoBackend']\n \n+BINARY_CODECS = frozenset(['pickle', 'msgpack'])\n+\n \n class MongoBackend(BaseBackend):\n \"\"\"MongoDB result backend.\n@@ -150,7 +152,12 @@\n if self.serializer == 'bson':\n # mongodb handles serialization\n return data\n- return super(MongoBackend, self).encode(data)\n+ payload = super(MongoBackend, self).encode(data)\n+\n+ # serializer which are in a unsupported format (pickle/binary)\n+ if self.serializer in BINARY_CODECS:\n+ payload = Binary(payload)\n+ return payload\n \n def decode(self, data):\n if self.serializer == 'bson':\n", "issue": "mongodb result backend with pickle serialisation broken in 4.0.0\nJust upgraded to celery 4.0.0. Mongodb result backend with pickle no longer works with the exception at the end of this message. \r\n\r\nThis is because commit https://github.com/celery/celery/commit/639b40f6308267312a1030bb3d6ac5805069510a removes the **essential** Binary() wrapper from the data written to mongo. As a result it writes raw binary data to mongo as a string, which then rejects it.\r\n\r\nI'm using settings:\r\nCELERY_ACCEPT_CONTENT = ['pickle']\r\nCELERY_TASK_SERIALIZER = 'pickle'\r\nCELERY_RESULT_SERIALIZER = 'pickle'\r\n\r\nException seen:\r\n> [2016-11-10 16:01:02,118: ERROR/MainProcess] Pool callback raised exception: InvalidStringData(\"strings in documents must be valid UTF-8: '\\\\x80\\\\x02N.'\",)\r\n> Traceback (most recent call last):\r\n> File \"/usr/lib/python2.7/site-packages/billiard/pool.py\", line 1748, in safe_apply_callback\r\n> fun(*args, **kwargs)\r\n> File \"/usr/lib/python2.7/site-packages/celery/worker/request.py\", line 366, in on_failure\r\n> self.id, exc, request=self, store_result=self.store_errors,\r\n> File \"/usr/lib/python2.7/site-packages/celery/backends/base.py\", line 163, in mark_as_failure\r\n> traceback=traceback, request=request)\r\n> File \"/usr/lib/python2.7/site-packages/celery/backends/base.py\", line 309, in store_result\r\n> request=request, **kwargs)\r\n> File \"/usr/lib/python2.7/site-packages/celery/backends/mongodb.py\", line 175, in _store_result\r\n> self.collection.save(meta)\r\n> File \"/usr/lib/python2.7/site-packages/pymongo/collection.py\", line 2192, in save\r\n> check_keys, False, manipulate, write_concern)\r\n> File \"/usr/lib/python2.7/site-packages/pymongo/collection.py\", line 715, in _update\r\n> codec_options=self.__write_response_codec_options).copy()\r\n> File \"/usr/lib/python2.7/site-packages/pymongo/pool.py\", line 244, in command\r\n> self._raise_connection_failure(error)\r\n> File \"/usr/lib/python2.7/site-packages/pymongo/pool.py\", line 372, in _raise_connection_failure\r\n> raise error\r\n> InvalidStringData: strings in documents must be valid UTF-8: '\\x80\\x02N.'\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"MongoDB result store backend.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom datetime import datetime, timedelta\nfrom kombu.utils.objects import cached_property\nfrom kombu.utils.url import maybe_sanitize_url\nfrom kombu.exceptions import EncodeError\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import string_t, items\nfrom .base import BaseBackend\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\n from pymongo.errors import InvalidDocument # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\n class InvalidDocument(Exception): # noqa\n pass\n\n__all__ = ['MongoBackend']\n\n\nclass MongoBackend(BaseBackend):\n \"\"\"MongoDB result backend.\n\n Raises:\n celery.exceptions.ImproperlyConfigured:\n if module :pypi:`pymongo` is not available.\n \"\"\"\n\n mongo_host = None\n host = 'localhost'\n port = 27017\n user = None\n password = None\n database_name = 'celery'\n taskmeta_collection = 'celery_taskmeta'\n groupmeta_collection = 'celery_groupmeta'\n max_pool_size = 10\n options = None\n\n supports_autoexpire = False\n\n _connection = None\n\n def __init__(self, app=None, **kwargs):\n self.options = {}\n\n super(MongoBackend, self).__init__(app, **kwargs)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n # Set option defaults\n for key, value in items(self._prepare_client_options()):\n self.options.setdefault(key, value)\n\n # update conf with mongo uri data, only if uri was given\n if self.url:\n if self.url == 'mongodb://':\n self.url += 'localhost'\n\n uri_data = pymongo.uri_parser.parse_uri(self.url)\n # build the hosts list to create a mongo connection\n hostslist = [\n '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']\n ]\n self.user = uri_data['username']\n self.password = uri_data['password']\n self.mongo_host = hostslist\n if uri_data['database']:\n # if no database is provided in the uri, use default\n self.database_name = uri_data['database']\n\n self.options.update(uri_data['options'])\n\n # update conf with specific settings\n config = self.app.conf.get('mongodb_backend_settings')\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n config = dict(config) # don't modify original\n\n if 'host' in config or 'port' in config:\n # these should take over uri conf\n self.mongo_host = None\n\n self.host = config.pop('host', self.host)\n self.port = config.pop('port', self.port)\n self.mongo_host = config.pop('mongo_host', self.mongo_host)\n self.user = config.pop('user', self.user)\n self.password = config.pop('password', self.password)\n self.database_name = config.pop('database', self.database_name)\n self.taskmeta_collection = config.pop(\n 'taskmeta_collection', self.taskmeta_collection,\n )\n self.groupmeta_collection = config.pop(\n 'groupmeta_collection', self.groupmeta_collection,\n )\n\n self.options.update(config.pop('options', {}))\n self.options.update(config)\n\n def _prepare_client_options(self):\n if pymongo.version_tuple >= (3,):\n return {'maxPoolSize': self.max_pool_size}\n else: # pragma: no cover\n return {'max_pool_size': self.max_pool_size,\n 'auto_start_request': False}\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo import MongoClient\n\n host = self.mongo_host\n if not host:\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n host = self.host\n if isinstance(host, string_t) \\\n and not host.startswith('mongodb://'):\n host = 'mongodb://{0}:{1}'.format(host, self.port)\n # don't change self.options\n conf = dict(self.options)\n conf['host'] = host\n\n self._connection = MongoClient(**conf)\n\n return self._connection\n\n def encode(self, data):\n if self.serializer == 'bson':\n # mongodb handles serialization\n return data\n return super(MongoBackend, self).encode(data)\n\n def decode(self, data):\n if self.serializer == 'bson':\n return data\n return super(MongoBackend, self).decode(data)\n\n def _store_result(self, task_id, result, state,\n traceback=None, request=None, **kwargs):\n \"\"\"Store return value and state of an executed task.\"\"\"\n meta = {\n '_id': task_id,\n 'status': state,\n 'result': self.encode(result),\n 'date_done': datetime.utcnow(),\n 'traceback': self.encode(traceback),\n 'children': self.encode(\n self.current_task_children(request),\n ),\n }\n\n try:\n self.collection.save(meta)\n except InvalidDocument as exc:\n raise EncodeError(exc)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task meta-data for a task by id.\"\"\"\n obj = self.collection.find_one({'_id': task_id})\n if obj:\n return self.meta_from_decoded({\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n })\n return {'status': states.PENDING, 'result': None}\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n self.group_collection.save({\n '_id': group_id,\n 'result': self.encode([i.id for i in result]),\n 'date_done': datetime.utcnow(),\n })\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.group_collection.find_one({'_id': group_id})\n if obj:\n return {\n 'task_id': obj['_id'],\n 'date_done': obj['date_done'],\n 'result': [\n self.app.AsyncResult(task)\n for task in self.decode(obj['result'])\n ],\n }\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.group_collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"Remove result from MongoDB.\n\n Raises:\n pymongo.exceptions.OperationsError:\n if the task_id could not be removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id})\n\n def cleanup(self):\n \"\"\"Delete expired meta-data.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n self.group_collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n return super(MongoBackend, self).__reduce__(\n args, dict(kwargs, expires=self.expires, url=self.url))\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.database_name]\n if self.user and self.password:\n if not db.authenticate(self.user, self.password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection.\n\n performs authentication if necessary.\n \"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def group_collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.groupmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def expires_delta(self):\n return timedelta(seconds=self.expires)\n\n def as_uri(self, include_password=False):\n \"\"\"Return the backend as an URI.\n\n Arguments:\n include_password (bool): Password censored if disabled.\n \"\"\"\n if not self.url:\n return 'mongodb://'\n if include_password:\n return self.url\n\n if ',' not in self.url:\n return maybe_sanitize_url(self.url)\n\n uri1, remainder = self.url.split(',', 1)\n return ','.join([maybe_sanitize_url(uri1), remainder])\n", "path": "celery/backends/mongodb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"MongoDB result store backend.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom datetime import datetime, timedelta\nfrom kombu.utils.objects import cached_property\nfrom kombu.utils.url import maybe_sanitize_url\nfrom kombu.exceptions import EncodeError\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import string_t, items\nfrom .base import BaseBackend\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\n from pymongo.errors import InvalidDocument # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\n class InvalidDocument(Exception): # noqa\n pass\n\n__all__ = ['MongoBackend']\n\nBINARY_CODECS = frozenset(['pickle', 'msgpack'])\n\n\nclass MongoBackend(BaseBackend):\n \"\"\"MongoDB result backend.\n\n Raises:\n celery.exceptions.ImproperlyConfigured:\n if module :pypi:`pymongo` is not available.\n \"\"\"\n\n mongo_host = None\n host = 'localhost'\n port = 27017\n user = None\n password = None\n database_name = 'celery'\n taskmeta_collection = 'celery_taskmeta'\n groupmeta_collection = 'celery_groupmeta'\n max_pool_size = 10\n options = None\n\n supports_autoexpire = False\n\n _connection = None\n\n def __init__(self, app=None, **kwargs):\n self.options = {}\n\n super(MongoBackend, self).__init__(app, **kwargs)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n # Set option defaults\n for key, value in items(self._prepare_client_options()):\n self.options.setdefault(key, value)\n\n # update conf with mongo uri data, only if uri was given\n if self.url:\n if self.url == 'mongodb://':\n self.url += 'localhost'\n\n uri_data = pymongo.uri_parser.parse_uri(self.url)\n # build the hosts list to create a mongo connection\n hostslist = [\n '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']\n ]\n self.user = uri_data['username']\n self.password = uri_data['password']\n self.mongo_host = hostslist\n if uri_data['database']:\n # if no database is provided in the uri, use default\n self.database_name = uri_data['database']\n\n self.options.update(uri_data['options'])\n\n # update conf with specific settings\n config = self.app.conf.get('mongodb_backend_settings')\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n config = dict(config) # don't modify original\n\n if 'host' in config or 'port' in config:\n # these should take over uri conf\n self.mongo_host = None\n\n self.host = config.pop('host', self.host)\n self.port = config.pop('port', self.port)\n self.mongo_host = config.pop('mongo_host', self.mongo_host)\n self.user = config.pop('user', self.user)\n self.password = config.pop('password', self.password)\n self.database_name = config.pop('database', self.database_name)\n self.taskmeta_collection = config.pop(\n 'taskmeta_collection', self.taskmeta_collection,\n )\n self.groupmeta_collection = config.pop(\n 'groupmeta_collection', self.groupmeta_collection,\n )\n\n self.options.update(config.pop('options', {}))\n self.options.update(config)\n\n def _prepare_client_options(self):\n if pymongo.version_tuple >= (3,):\n return {'maxPoolSize': self.max_pool_size}\n else: # pragma: no cover\n return {'max_pool_size': self.max_pool_size,\n 'auto_start_request': False}\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo import MongoClient\n\n host = self.mongo_host\n if not host:\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n host = self.host\n if isinstance(host, string_t) \\\n and not host.startswith('mongodb://'):\n host = 'mongodb://{0}:{1}'.format(host, self.port)\n # don't change self.options\n conf = dict(self.options)\n conf['host'] = host\n\n self._connection = MongoClient(**conf)\n\n return self._connection\n\n def encode(self, data):\n if self.serializer == 'bson':\n # mongodb handles serialization\n return data\n payload = super(MongoBackend, self).encode(data)\n\n # serializer which are in a unsupported format (pickle/binary)\n if self.serializer in BINARY_CODECS:\n payload = Binary(payload)\n return payload\n\n def decode(self, data):\n if self.serializer == 'bson':\n return data\n return super(MongoBackend, self).decode(data)\n\n def _store_result(self, task_id, result, state,\n traceback=None, request=None, **kwargs):\n \"\"\"Store return value and state of an executed task.\"\"\"\n meta = {\n '_id': task_id,\n 'status': state,\n 'result': self.encode(result),\n 'date_done': datetime.utcnow(),\n 'traceback': self.encode(traceback),\n 'children': self.encode(\n self.current_task_children(request),\n ),\n }\n\n try:\n self.collection.save(meta)\n except InvalidDocument as exc:\n raise EncodeError(exc)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task meta-data for a task by id.\"\"\"\n obj = self.collection.find_one({'_id': task_id})\n if obj:\n return self.meta_from_decoded({\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n })\n return {'status': states.PENDING, 'result': None}\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n self.group_collection.save({\n '_id': group_id,\n 'result': self.encode([i.id for i in result]),\n 'date_done': datetime.utcnow(),\n })\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.group_collection.find_one({'_id': group_id})\n if obj:\n return {\n 'task_id': obj['_id'],\n 'date_done': obj['date_done'],\n 'result': [\n self.app.AsyncResult(task)\n for task in self.decode(obj['result'])\n ],\n }\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.group_collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"Remove result from MongoDB.\n\n Raises:\n pymongo.exceptions.OperationsError:\n if the task_id could not be removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id})\n\n def cleanup(self):\n \"\"\"Delete expired meta-data.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n self.group_collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n return super(MongoBackend, self).__reduce__(\n args, dict(kwargs, expires=self.expires, url=self.url))\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.database_name]\n if self.user and self.password:\n if not db.authenticate(self.user, self.password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection.\n\n performs authentication if necessary.\n \"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def group_collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.groupmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def expires_delta(self):\n return timedelta(seconds=self.expires)\n\n def as_uri(self, include_password=False):\n \"\"\"Return the backend as an URI.\n\n Arguments:\n include_password (bool): Password censored if disabled.\n \"\"\"\n if not self.url:\n return 'mongodb://'\n if include_password:\n return self.url\n\n if ',' not in self.url:\n return maybe_sanitize_url(self.url)\n\n uri1, remainder = self.url.split(',', 1)\n return ','.join([maybe_sanitize_url(uri1), remainder])\n", "path": "celery/backends/mongodb.py"}]} | 3,880 | 210 |
gh_patches_debug_2608 | rasdani/github-patches | git_diff | ultralytics__yolov5-296 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects
Hi,
I meet a problem:
Traceback (most recent call last):
File "train.py", line 394, in <module>
train(hyp)
File "train.py", line 331, in train
torch.save(ckpt, last)
File "/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 328, in save
_legacy_save(obj, opened_file, pickle_module, pickle_protocol)
File "/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 401, in _legacy_save
pickler.dump(obj)
**TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects**
Thanks!
environment:
ubuntu 16.04
GPU 2080Ti *4
pytorch 1.4.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/torch_utils.py`
Content:
```
1 import math
2 import os
3 import time
4 from copy import deepcopy
5
6 import torch
7 import torch.backends.cudnn as cudnn
8 import torch.nn as nn
9 import torch.nn.functional as F
10 import torchvision.models as models
11
12
13 def init_seeds(seed=0):
14 torch.manual_seed(seed)
15
16 # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
17 if seed == 0: # slower, more reproducible
18 cudnn.deterministic = True
19 cudnn.benchmark = False
20 else: # faster, less reproducible
21 cudnn.deterministic = False
22 cudnn.benchmark = True
23
24
25 def select_device(device='', apex=False, batch_size=None):
26 # device = 'cpu' or '0' or '0,1,2,3'
27 cpu_request = device.lower() == 'cpu'
28 if device and not cpu_request: # if device requested other than 'cpu'
29 os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
30 assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
31
32 cuda = False if cpu_request else torch.cuda.is_available()
33 if cuda:
34 c = 1024 ** 2 # bytes to MB
35 ng = torch.cuda.device_count()
36 if ng > 1 and batch_size: # check that batch_size is compatible with device_count
37 assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
38 x = [torch.cuda.get_device_properties(i) for i in range(ng)]
39 s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex
40 for i in range(0, ng):
41 if i == 1:
42 s = ' ' * len(s)
43 print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
44 (s, i, x[i].name, x[i].total_memory / c))
45 else:
46 print('Using CPU')
47
48 print('') # skip a line
49 return torch.device('cuda:0' if cuda else 'cpu')
50
51
52 def time_synchronized():
53 torch.cuda.synchronize() if torch.cuda.is_available() else None
54 return time.time()
55
56
57 def is_parallel(model):
58 # is model is parallel with DP or DDP
59 return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
60
61
62 def initialize_weights(model):
63 for m in model.modules():
64 t = type(m)
65 if t is nn.Conv2d:
66 pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
67 elif t is nn.BatchNorm2d:
68 m.eps = 1e-4
69 m.momentum = 0.03
70 elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
71 m.inplace = True
72
73
74 def find_modules(model, mclass=nn.Conv2d):
75 # finds layer indices matching module class 'mclass'
76 return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
77
78
79 def fuse_conv_and_bn(conv, bn):
80 # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
81 with torch.no_grad():
82 # init
83 fusedconv = torch.nn.Conv2d(conv.in_channels,
84 conv.out_channels,
85 kernel_size=conv.kernel_size,
86 stride=conv.stride,
87 padding=conv.padding,
88 bias=True)
89
90 # prepare filters
91 w_conv = conv.weight.clone().view(conv.out_channels, -1)
92 w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
93 fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
94
95 # prepare spatial bias
96 if conv.bias is not None:
97 b_conv = conv.bias
98 else:
99 b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)
100 b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
101 fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
102
103 return fusedconv
104
105
106 def model_info(model, verbose=False):
107 # Plots a line-by-line description of a PyTorch model
108 n_p = sum(x.numel() for x in model.parameters()) # number parameters
109 n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
110 if verbose:
111 print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
112 for i, (name, p) in enumerate(model.named_parameters()):
113 name = name.replace('module_list.', '')
114 print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
115 (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
116
117 try: # FLOPS
118 from thop import profile
119 flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
120 fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
121 except:
122 fs = ''
123
124 print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
125
126
127 def load_classifier(name='resnet101', n=2):
128 # Loads a pretrained model reshaped to n-class output
129 model = models.__dict__[name](pretrained=True)
130
131 # Display model properties
132 input_size = [3, 224, 224]
133 input_space = 'RGB'
134 input_range = [0, 1]
135 mean = [0.485, 0.456, 0.406]
136 std = [0.229, 0.224, 0.225]
137 for x in [input_size, input_space, input_range, mean, std]:
138 print(x + ' =', eval(x))
139
140 # Reshape output to n classes
141 filters = model.fc.weight.shape[1]
142 model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)
143 model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)
144 model.fc.out_features = n
145 return model
146
147
148 def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
149 # scales img(bs,3,y,x) by ratio
150 h, w = img.shape[2:]
151 s = (int(h * ratio), int(w * ratio)) # new size
152 img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
153 if not same_shape: # pad/crop img
154 gs = 32 # (pixels) grid size
155 h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
156 return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
157
158
159 class ModelEMA:
160 """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
161 Keep a moving average of everything in the model state_dict (parameters and buffers).
162 This is intended to allow functionality like
163 https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
164 A smoothed version of the weights is necessary for some training schemes to perform well.
165 E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
166 RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
167 smoothing of weights to match results. Pay attention to the decay constant you are using
168 relative to your update count per epoch.
169 To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
170 disable validation of the EMA weights. Validation will have to be done manually in a separate
171 process, or after the training stops converging.
172 This class is sensitive where it is initialized in the sequence of model init,
173 GPU assignment and distributed training wrappers.
174 I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
175 """
176
177 def __init__(self, model, decay=0.9999, device=''):
178 # Create EMA
179 self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA
180 self.ema.eval()
181 self.updates = 0 # number of EMA updates
182 self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
183 self.device = device # perform ema on different device from model if set
184 if device:
185 self.ema.to(device)
186 for p in self.ema.parameters():
187 p.requires_grad_(False)
188
189 def update(self, model):
190 # Update EMA parameters
191 with torch.no_grad():
192 self.updates += 1
193 d = self.decay(self.updates)
194
195 msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
196 for k, v in self.ema.state_dict().items():
197 if v.dtype.is_floating_point:
198 v *= d
199 v += (1. - d) * msd[k].detach()
200
201 def update_attr(self, model):
202 # Update EMA attributes
203 for k, v in model.__dict__.items():
204 if not k.startswith('_') and k != 'module':
205 setattr(self.ema, k, v)
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/torch_utils.py b/utils/torch_utils.py
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -201,5 +201,5 @@
def update_attr(self, model):
# Update EMA attributes
for k, v in model.__dict__.items():
- if not k.startswith('_') and k != 'module':
+ if not k.startswith('_') and k not in ["process_group", "reducer"]:
setattr(self.ema, k, v)
| {"golden_diff": "diff --git a/utils/torch_utils.py b/utils/torch_utils.py\n--- a/utils/torch_utils.py\n+++ b/utils/torch_utils.py\n@@ -201,5 +201,5 @@\n def update_attr(self, model):\n # Update EMA attributes\n for k, v in model.__dict__.items():\n- if not k.startswith('_') and k != 'module':\n+ if not k.startswith('_') and k not in [\"process_group\", \"reducer\"]:\n setattr(self.ema, k, v)\n", "issue": "TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects\nHi\uff0c\r\nI meet a problem:\r\n\r\nTraceback (most recent call last):\r\n File \"train.py\", line 394, in <module>\r\n train(hyp)\r\n File \"train.py\", line 331, in train\r\n torch.save(ckpt, last)\r\n File \"/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py\", line 328, in save\r\n _legacy_save(obj, opened_file, pickle_module, pickle_protocol)\r\n File \"/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py\", line 401, in _legacy_save\r\n pickler.dump(obj)\r\n**TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects**\r\n\r\nThanks!\r\n\r\nenvironment:\r\nubuntu 16.04\r\nGPU 2080Ti *4\r\npytorch 1.4.0\r\n\n", "before_files": [{"content": "import math\nimport os\nimport time\nfrom copy import deepcopy\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\n\ndef init_seeds(seed=0):\n torch.manual_seed(seed)\n\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n if seed == 0: # slower, more reproducible\n cudnn.deterministic = True\n cudnn.benchmark = False\n else: # faster, less reproducible\n cudnn.deterministic = False\n cudnn.benchmark = True\n\n\ndef select_device(device='', apex=False, batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n cpu_request = device.lower() == 'cpu'\n if device and not cpu_request: # if device requested other than 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity\n\n cuda = False if cpu_request else torch.cuda.is_available()\n if cuda:\n c = 1024 ** 2 # bytes to MB\n ng = torch.cuda.device_count()\n if ng > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)\n x = [torch.cuda.get_device_properties(i) for i in range(ng)]\n s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex\n for i in range(0, ng):\n if i == 1:\n s = ' ' * len(s)\n print(\"%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)\" %\n (s, i, x[i].name, x[i].total_memory / c))\n else:\n print('Using CPU')\n\n print('') # skip a line\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()\n\n\ndef is_parallel(model):\n # is model is parallel with DP or DDP\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-4\n m.momentum = 0.03\n elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef fuse_conv_and_bn(conv, bn):\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n with torch.no_grad():\n # init\n fusedconv = torch.nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n bias=True)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n # prepare spatial bias\n if conv.bias is not None:\n b_conv = conv.bias\n else:\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2\n fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS\n except:\n fs = ''\n\n print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = models.__dict__[name](pretrained=True)\n\n # Display model properties\n input_size = [3, 224, 224]\n input_space = 'RGB'\n input_range = [0, 1]\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n for x in [input_size, input_space, input_range, mean, std]:\n print(x + ' =', eval(x))\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio\n # scales img(bs,3,y,x) by ratio\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n gs = 32 # (pixels) grid size\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use\n RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA\n smoothing of weights to match results. Pay attention to the decay constant you are using\n relative to your update count per epoch.\n To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but\n disable validation of the EMA weights. Validation will have to be done manually in a separate\n process, or after the training stops converging.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, device=''):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA\n self.ema.eval()\n self.updates = 0 # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n self.device = device # perform ema on different device from model if set\n if device:\n self.ema.to(device)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model):\n # Update EMA attributes\n for k, v in model.__dict__.items():\n if not k.startswith('_') and k != 'module':\n setattr(self.ema, k, v)\n", "path": "utils/torch_utils.py"}], "after_files": [{"content": "import math\nimport os\nimport time\nfrom copy import deepcopy\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\n\ndef init_seeds(seed=0):\n torch.manual_seed(seed)\n\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n if seed == 0: # slower, more reproducible\n cudnn.deterministic = True\n cudnn.benchmark = False\n else: # faster, less reproducible\n cudnn.deterministic = False\n cudnn.benchmark = True\n\n\ndef select_device(device='', apex=False, batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n cpu_request = device.lower() == 'cpu'\n if device and not cpu_request: # if device requested other than 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity\n\n cuda = False if cpu_request else torch.cuda.is_available()\n if cuda:\n c = 1024 ** 2 # bytes to MB\n ng = torch.cuda.device_count()\n if ng > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)\n x = [torch.cuda.get_device_properties(i) for i in range(ng)]\n s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex\n for i in range(0, ng):\n if i == 1:\n s = ' ' * len(s)\n print(\"%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)\" %\n (s, i, x[i].name, x[i].total_memory / c))\n else:\n print('Using CPU')\n\n print('') # skip a line\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()\n\n\ndef is_parallel(model):\n # is model is parallel with DP or DDP\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-4\n m.momentum = 0.03\n elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef fuse_conv_and_bn(conv, bn):\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n with torch.no_grad():\n # init\n fusedconv = torch.nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n bias=True)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n # prepare spatial bias\n if conv.bias is not None:\n b_conv = conv.bias\n else:\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2\n fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS\n except:\n fs = ''\n\n print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = models.__dict__[name](pretrained=True)\n\n # Display model properties\n input_size = [3, 224, 224]\n input_space = 'RGB'\n input_range = [0, 1]\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n for x in [input_size, input_space, input_range, mean, std]:\n print(x + ' =', eval(x))\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio\n # scales img(bs,3,y,x) by ratio\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n gs = 32 # (pixels) grid size\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use\n RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA\n smoothing of weights to match results. Pay attention to the decay constant you are using\n relative to your update count per epoch.\n To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but\n disable validation of the EMA weights. Validation will have to be done manually in a separate\n process, or after the training stops converging.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, device=''):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA\n self.ema.eval()\n self.updates = 0 # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n self.device = device # perform ema on different device from model if set\n if device:\n self.ema.to(device)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model):\n # Update EMA attributes\n for k, v in model.__dict__.items():\n if not k.startswith('_') and k not in [\"process_group\", \"reducer\"]:\n setattr(self.ema, k, v)\n", "path": "utils/torch_utils.py"}]} | 3,308 | 117 |
gh_patches_debug_40082 | rasdani/github-patches | git_diff | huggingface__diffusers-6192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[IP-Adapter] add a 'unload_ip_adapter' method
fix https://github.com/huggingface/diffusers/issues/6088
Is anyone interested in adding a `unload_ip_adapter` method? it needs to undo everything we did with `load_ip_adapter` method:
https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/ip_adapter.py#L46
It needs to do below 3 things:
1. remove `image_encoder`
2. undo the changes we made to the UNet structure here (https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/unet.py#L82), i.e.
```python
self.encoder_hid_proj = None
self.config.encoder_hid_dim_type = None
```
3. reset the attention processor
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/diffusers/loaders/ip_adapter.py`
Content:
```
1 # Copyright 2023 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Dict, Union
16
17 import torch
18 from huggingface_hub.utils import validate_hf_hub_args
19 from safetensors import safe_open
20
21 from ..utils import (
22 _get_model_file,
23 is_transformers_available,
24 logging,
25 )
26
27
28 if is_transformers_available():
29 from transformers import (
30 CLIPImageProcessor,
31 CLIPVisionModelWithProjection,
32 )
33
34 from ..models.attention_processor import (
35 IPAdapterAttnProcessor,
36 IPAdapterAttnProcessor2_0,
37 )
38
39 logger = logging.get_logger(__name__)
40
41
42 class IPAdapterMixin:
43 """Mixin for handling IP Adapters."""
44
45 @validate_hf_hub_args
46 def load_ip_adapter(
47 self,
48 pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
49 subfolder: str,
50 weight_name: str,
51 **kwargs,
52 ):
53 """
54 Parameters:
55 pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
56 Can be either:
57
58 - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
59 the Hub.
60 - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
61 with [`ModelMixin.save_pretrained`].
62 - A [torch state
63 dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
64
65 cache_dir (`Union[str, os.PathLike]`, *optional*):
66 Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
67 is not used.
68 force_download (`bool`, *optional*, defaults to `False`):
69 Whether or not to force the (re-)download of the model weights and configuration files, overriding the
70 cached versions if they exist.
71 resume_download (`bool`, *optional*, defaults to `False`):
72 Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
73 incompletely downloaded files are deleted.
74 proxies (`Dict[str, str]`, *optional*):
75 A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
76 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
77 local_files_only (`bool`, *optional*, defaults to `False`):
78 Whether to only load local model weights and configuration files or not. If set to `True`, the model
79 won't be downloaded from the Hub.
80 token (`str` or *bool*, *optional*):
81 The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
82 `diffusers-cli login` (stored in `~/.huggingface`) is used.
83 revision (`str`, *optional*, defaults to `"main"`):
84 The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
85 allowed by Git.
86 subfolder (`str`, *optional*, defaults to `""`):
87 The subfolder location of a model file within a larger model repository on the Hub or locally.
88 """
89
90 # Load the main state dict first.
91 cache_dir = kwargs.pop("cache_dir", None)
92 force_download = kwargs.pop("force_download", False)
93 resume_download = kwargs.pop("resume_download", False)
94 proxies = kwargs.pop("proxies", None)
95 local_files_only = kwargs.pop("local_files_only", None)
96 token = kwargs.pop("token", None)
97 revision = kwargs.pop("revision", None)
98
99 user_agent = {
100 "file_type": "attn_procs_weights",
101 "framework": "pytorch",
102 }
103
104 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
105 model_file = _get_model_file(
106 pretrained_model_name_or_path_or_dict,
107 weights_name=weight_name,
108 cache_dir=cache_dir,
109 force_download=force_download,
110 resume_download=resume_download,
111 proxies=proxies,
112 local_files_only=local_files_only,
113 token=token,
114 revision=revision,
115 subfolder=subfolder,
116 user_agent=user_agent,
117 )
118 if weight_name.endswith(".safetensors"):
119 state_dict = {"image_proj": {}, "ip_adapter": {}}
120 with safe_open(model_file, framework="pt", device="cpu") as f:
121 for key in f.keys():
122 if key.startswith("image_proj."):
123 state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
124 elif key.startswith("ip_adapter."):
125 state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
126 else:
127 state_dict = torch.load(model_file, map_location="cpu")
128 else:
129 state_dict = pretrained_model_name_or_path_or_dict
130
131 keys = list(state_dict.keys())
132 if keys != ["image_proj", "ip_adapter"]:
133 raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
134
135 # load CLIP image encoer here if it has not been registered to the pipeline yet
136 if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
137 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
138 logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
139 image_encoder = CLIPVisionModelWithProjection.from_pretrained(
140 pretrained_model_name_or_path_or_dict,
141 subfolder=os.path.join(subfolder, "image_encoder"),
142 ).to(self.device, dtype=self.dtype)
143 self.image_encoder = image_encoder
144 else:
145 raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
146
147 # create feature extractor if it has not been registered to the pipeline yet
148 if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
149 self.feature_extractor = CLIPImageProcessor()
150
151 # load ip-adapter into unet
152 self.unet._load_ip_adapter_weights(state_dict)
153
154 def set_ip_adapter_scale(self, scale):
155 for attn_processor in self.unet.attn_processors.values():
156 if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
157 attn_processor.scale = scale
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py
--- a/src/diffusers/loaders/ip_adapter.py
+++ b/src/diffusers/loaders/ip_adapter.py
@@ -132,7 +132,7 @@
if keys != ["image_proj", "ip_adapter"]:
raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
- # load CLIP image encoer here if it has not been registered to the pipeline yet
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
@@ -141,12 +141,14 @@
subfolder=os.path.join(subfolder, "image_encoder"),
).to(self.device, dtype=self.dtype)
self.image_encoder = image_encoder
+ self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
else:
raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
# create feature extractor if it has not been registered to the pipeline yet
if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
self.feature_extractor = CLIPImageProcessor()
+ self.register_to_config(feature_extractor=["transformers", "CLIPImageProcessor"])
# load ip-adapter into unet
self.unet._load_ip_adapter_weights(state_dict)
@@ -155,3 +157,32 @@
for attn_processor in self.unet.attn_processors.values():
if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
attn_processor.scale = scale
+
+ def unload_ip_adapter(self):
+ """
+ Unloads the IP Adapter weights
+
+ Examples:
+
+ ```python
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
+ >>> pipeline.unload_ip_adapter()
+ >>> ...
+ ```
+ """
+ # remove CLIP image encoder
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
+ self.image_encoder = None
+ self.register_to_config(image_encoder=[None, None])
+
+ # remove feature extractor
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
+ self.feature_extractor = None
+ self.register_to_config(feature_extractor=[None, None])
+
+ # remove hidden encoder
+ self.unet.encoder_hid_proj = None
+ self.config.encoder_hid_dim_type = None
+
+ # restore original Unet attention processors layers
+ self.unet.set_default_attn_processor()
| {"golden_diff": "diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py\n--- a/src/diffusers/loaders/ip_adapter.py\n+++ b/src/diffusers/loaders/ip_adapter.py\n@@ -132,7 +132,7 @@\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n \n- # load CLIP image encoer here if it has not been registered to the pipeline yet\n+ # load CLIP image encoder here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n@@ -141,12 +141,14 @@\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n+ self.register_to_config(image_encoder=[\"transformers\", \"CLIPVisionModelWithProjection\"])\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n \n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n+ self.register_to_config(feature_extractor=[\"transformers\", \"CLIPImageProcessor\"])\n \n # load ip-adapter into unet\n self.unet._load_ip_adapter_weights(state_dict)\n@@ -155,3 +157,32 @@\n for attn_processor in self.unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n+\n+ def unload_ip_adapter(self):\n+ \"\"\"\n+ Unloads the IP Adapter weights\n+\n+ Examples:\n+\n+ ```python\n+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.\n+ >>> pipeline.unload_ip_adapter()\n+ >>> ...\n+ ```\n+ \"\"\"\n+ # remove CLIP image encoder\n+ if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is not None:\n+ self.image_encoder = None\n+ self.register_to_config(image_encoder=[None, None])\n+\n+ # remove feature extractor\n+ if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is not None:\n+ self.feature_extractor = None\n+ self.register_to_config(feature_extractor=[None, None])\n+\n+ # remove hidden encoder\n+ self.unet.encoder_hid_proj = None\n+ self.config.encoder_hid_dim_type = None\n+\n+ # restore original Unet attention processors layers\n+ self.unet.set_default_attn_processor()\n", "issue": "[IP-Adapter] add a 'unload_ip_adapter' method\nfix https://github.com/huggingface/diffusers/issues/6088\r\n\r\nIs anyone interested in adding a `unload_ip_adapter` method? it needs to undo everything we did with `load_ip_adapter` method:\r\nhttps://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/ip_adapter.py#L46\r\n\r\nIt needs to do below 3 things:\r\n1. remove `image_encoder`\r\n2. undo the changes we made to the UNet structure here (https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/unet.py#L82), i.e. \r\n ```python\r\n self.encoder_hid_proj = None\r\n self.config.encoder_hid_dim_type = None\r\n ```\r\n3. reset the attention processor \n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Dict, Union\n\nimport torch\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom safetensors import safe_open\n\nfrom ..utils import (\n _get_model_file,\n is_transformers_available,\n logging,\n)\n\n\nif is_transformers_available():\n from transformers import (\n CLIPImageProcessor,\n CLIPVisionModelWithProjection,\n )\n\n from ..models.attention_processor import (\n IPAdapterAttnProcessor,\n IPAdapterAttnProcessor2_0,\n )\n\nlogger = logging.get_logger(__name__)\n\n\nclass IPAdapterMixin:\n \"\"\"Mixin for handling IP Adapters.\"\"\"\n\n @validate_hf_hub_args\n def load_ip_adapter(\n self,\n pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n subfolder: str,\n weight_name: str,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`ModelMixin.save_pretrained`].\n - A [torch state\n dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n \"\"\"\n\n # Load the main state dict first.\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", None)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n user_agent = {\n \"file_type\": \"attn_procs_weights\",\n \"framework\": \"pytorch\",\n }\n\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n model_file = _get_model_file(\n pretrained_model_name_or_path_or_dict,\n weights_name=weight_name,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n )\n if weight_name.endswith(\".safetensors\"):\n state_dict = {\"image_proj\": {}, \"ip_adapter\": {}}\n with safe_open(model_file, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n if key.startswith(\"image_proj.\"):\n state_dict[\"image_proj\"][key.replace(\"image_proj.\", \"\")] = f.get_tensor(key)\n elif key.startswith(\"ip_adapter.\"):\n state_dict[\"ip_adapter\"][key.replace(\"ip_adapter.\", \"\")] = f.get_tensor(key)\n else:\n state_dict = torch.load(model_file, map_location=\"cpu\")\n else:\n state_dict = pretrained_model_name_or_path_or_dict\n\n keys = list(state_dict.keys())\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n\n # load CLIP image encoer here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n image_encoder = CLIPVisionModelWithProjection.from_pretrained(\n pretrained_model_name_or_path_or_dict,\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n\n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n\n # load ip-adapter into unet\n self.unet._load_ip_adapter_weights(state_dict)\n\n def set_ip_adapter_scale(self, scale):\n for attn_processor in self.unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n", "path": "src/diffusers/loaders/ip_adapter.py"}], "after_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Dict, Union\n\nimport torch\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom safetensors import safe_open\n\nfrom ..utils import (\n _get_model_file,\n is_transformers_available,\n logging,\n)\n\n\nif is_transformers_available():\n from transformers import (\n CLIPImageProcessor,\n CLIPVisionModelWithProjection,\n )\n\n from ..models.attention_processor import (\n IPAdapterAttnProcessor,\n IPAdapterAttnProcessor2_0,\n )\n\nlogger = logging.get_logger(__name__)\n\n\nclass IPAdapterMixin:\n \"\"\"Mixin for handling IP Adapters.\"\"\"\n\n @validate_hf_hub_args\n def load_ip_adapter(\n self,\n pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n subfolder: str,\n weight_name: str,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`ModelMixin.save_pretrained`].\n - A [torch state\n dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n \"\"\"\n\n # Load the main state dict first.\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", None)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n user_agent = {\n \"file_type\": \"attn_procs_weights\",\n \"framework\": \"pytorch\",\n }\n\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n model_file = _get_model_file(\n pretrained_model_name_or_path_or_dict,\n weights_name=weight_name,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n )\n if weight_name.endswith(\".safetensors\"):\n state_dict = {\"image_proj\": {}, \"ip_adapter\": {}}\n with safe_open(model_file, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n if key.startswith(\"image_proj.\"):\n state_dict[\"image_proj\"][key.replace(\"image_proj.\", \"\")] = f.get_tensor(key)\n elif key.startswith(\"ip_adapter.\"):\n state_dict[\"ip_adapter\"][key.replace(\"ip_adapter.\", \"\")] = f.get_tensor(key)\n else:\n state_dict = torch.load(model_file, map_location=\"cpu\")\n else:\n state_dict = pretrained_model_name_or_path_or_dict\n\n keys = list(state_dict.keys())\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n\n # load CLIP image encoder here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n image_encoder = CLIPVisionModelWithProjection.from_pretrained(\n pretrained_model_name_or_path_or_dict,\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n self.register_to_config(image_encoder=[\"transformers\", \"CLIPVisionModelWithProjection\"])\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n\n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n self.register_to_config(feature_extractor=[\"transformers\", \"CLIPImageProcessor\"])\n\n # load ip-adapter into unet\n self.unet._load_ip_adapter_weights(state_dict)\n\n def set_ip_adapter_scale(self, scale):\n for attn_processor in self.unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n\n def unload_ip_adapter(self):\n \"\"\"\n Unloads the IP Adapter weights\n\n Examples:\n\n ```python\n >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.\n >>> pipeline.unload_ip_adapter()\n >>> ...\n ```\n \"\"\"\n # remove CLIP image encoder\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is not None:\n self.image_encoder = None\n self.register_to_config(image_encoder=[None, None])\n\n # remove feature extractor\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is not None:\n self.feature_extractor = None\n self.register_to_config(feature_extractor=[None, None])\n\n # remove hidden encoder\n self.unet.encoder_hid_proj = None\n self.config.encoder_hid_dim_type = None\n\n # restore original Unet attention processors layers\n self.unet.set_default_attn_processor()\n", "path": "src/diffusers/loaders/ip_adapter.py"}]} | 2,407 | 657 |
gh_patches_debug_2105 | rasdani/github-patches | git_diff | pallets__click-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automatically dedent help text of options?
```
import click
@click.command()
@click.option('--foo', help="""
heyho
i am
multiline
""")
def cli(foo):
click.echo(foo)
cli()
```
This currently does not remove the leading whitespace from each paragraph in the help text for `--foo`:
```
untitaker@untibox:~/projects/click, branch master
$ python lol.py --help
Usage: lol.py [OPTIONS]
Options:
--foo TEXT
heyho
i am
multiline
--help Show this message and exit.
```
Although i am not sure if help texts for options are even supposed to get that
complex.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `click/decorators.py`
Content:
```
1 import sys
2 import inspect
3
4 from functools import update_wrapper
5
6 from ._compat import iteritems
7 from .utils import echo
8
9
10 def pass_context(f):
11 """Marks a callback as wanting to receive the current context
12 object as first argument.
13 """
14 f.__click_pass_context__ = True
15 return f
16
17
18 def pass_obj(f):
19 """Similar to :func:`pass_context`, but only pass the object on the
20 context onwards (:attr:`Context.obj`). This is useful if that object
21 represents the state of a nested system.
22 """
23 @pass_context
24 def new_func(*args, **kwargs):
25 ctx = args[0]
26 return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)
27 return update_wrapper(new_func, f)
28
29
30 def make_pass_decorator(object_type, ensure=False):
31 """Given an object type this creates a decorator that will work
32 similar to :func:`pass_obj` but instead of passing the object of the
33 current context, it will find the innermost context of type
34 :func:`object_type`.
35
36 This generates a decorator that works roughly like this::
37
38 from functools import update_wrapper
39
40 def decorator(f):
41 @pass_context
42 def new_func(ctx, *args, **kwargs):
43 obj = ctx.find_object(object_type)
44 return ctx.invoke(f, obj, *args, **kwargs)
45 return update_wrapper(new_func, f)
46 return decorator
47
48 :param object_type: the type of the object to pass.
49 :param ensure: if set to `True`, a new object will be created and
50 remembered on the context if it's not there yet.
51 """
52 def decorator(f):
53 @pass_context
54 def new_func(*args, **kwargs):
55 ctx = args[0]
56 if ensure:
57 obj = ctx.ensure_object(object_type)
58 else:
59 obj = ctx.find_object(object_type)
60 if obj is None:
61 raise RuntimeError('Managed to invoke callback without a '
62 'context object of type %r existing'
63 % object_type.__name__)
64 return ctx.invoke(f, obj, *args[1:], **kwargs)
65 return update_wrapper(new_func, f)
66 return decorator
67
68
69 def _make_command(f, name, attrs, cls):
70 if isinstance(f, Command):
71 raise TypeError('Attempted to convert a callback into a '
72 'command twice.')
73 try:
74 params = f.__click_params__
75 params.reverse()
76 del f.__click_params__
77 except AttributeError:
78 params = []
79 help = inspect.getdoc(f)
80 if isinstance(help, bytes):
81 help = help.decode('utf-8')
82 attrs.setdefault('help', help)
83 return cls(name=name or f.__name__.lower(),
84 callback=f, params=params, **attrs)
85
86
87 def command(name=None, cls=None, **attrs):
88 """Creates a new :class:`Command` and uses the decorated function as
89 callback. This will also automatically attach all decorated
90 :func:`option`\s and :func:`argument`\s as parameters to the command.
91
92 The name of the command defaults to the name of the function. If you
93 want to change that, you can pass the intended name as the first
94 argument.
95
96 All keyword arguments are forwarded to the underlying command class.
97
98 Once decorated the function turns into a :class:`Command` instance
99 that can be invoked as a command line utility or be attached to a
100 command :class:`Group`.
101
102 :param name: the name of the command. This defaults to the function
103 name.
104 :param cls: the command class to instantiate. This defaults to
105 :class:`Command`.
106 """
107 if cls is None:
108 cls = Command
109 def decorator(f):
110 return _make_command(f, name, attrs, cls)
111 return decorator
112
113
114 def group(name=None, **attrs):
115 """Creates a new :class:`Group` with a function as callback. This
116 works otherwise the same as :func:`command` just that the `cls`
117 parameter is set to :class:`Group`.
118 """
119 attrs.setdefault('cls', Group)
120 return command(name, **attrs)
121
122
123 def _param_memo(f, param):
124 if isinstance(f, Command):
125 f.params.append(param)
126 else:
127 if not hasattr(f, '__click_params__'):
128 f.__click_params__ = []
129 f.__click_params__.append(param)
130
131
132 def argument(*param_decls, **attrs):
133 """Attaches an option to the command. All positional arguments are
134 passed as parameter declarations to :class:`Argument`; all keyword
135 arguments are forwarded unchanged. This is equivalent to creating an
136 :class:`Option` instance manually and attaching it to the
137 :attr:`Command.params` list.
138 """
139 def decorator(f):
140 _param_memo(f, Argument(param_decls, **attrs))
141 return f
142 return decorator
143
144
145 def option(*param_decls, **attrs):
146 """Attaches an option to the command. All positional arguments are
147 passed as parameter declarations to :class:`Option`; all keyword
148 arguments are forwarded unchanged. This is equivalent to creating an
149 :class:`Option` instance manually and attaching it to the
150 :attr:`Command.params` list.
151 """
152 def decorator(f):
153 _param_memo(f, Option(param_decls, **attrs))
154 return f
155 return decorator
156
157
158 def confirmation_option(*param_decls, **attrs):
159 """Shortcut for confirmation prompts that can be ignored by passing
160 ``--yes`` as parameter.
161
162 This is equivalent to decorating a function with :func:`option` with
163 the following parameters::
164
165 def callback(ctx, param, value):
166 if not value:
167 ctx.abort()
168
169 @click.command()
170 @click.option('--yes', is_flag=True, callback=callback,
171 expose_value=False, prompt='Do you want to continue?')
172 def dropdb():
173 pass
174 """
175 def decorator(f):
176 def callback(ctx, param, value):
177 if not value:
178 ctx.abort()
179 attrs.setdefault('is_flag', True)
180 attrs.setdefault('callback', callback)
181 attrs.setdefault('expose_value', False)
182 attrs.setdefault('prompt', 'Do you want to continue?')
183 attrs.setdefault('help', 'Confirm the action without prompting.')
184 return option(*(param_decls or ('--yes',)), **attrs)(f)
185 return decorator
186
187
188 def password_option(*param_decls, **attrs):
189 """Shortcut for password prompts.
190
191 This is equivalent to decorating a function with :func:`option` with
192 the following parameters::
193
194 @click.command()
195 @click.option('--password', prompt=True, confirmation_prompt=True,
196 hide_input=True)
197 def changeadmin(password):
198 pass
199 """
200 def decorator(f):
201 attrs.setdefault('prompt', True)
202 attrs.setdefault('confirmation_prompt', True)
203 attrs.setdefault('hide_input', True)
204 return option(*(param_decls or ('--password',)), **attrs)(f)
205 return decorator
206
207
208 def version_option(version=None, *param_decls, **attrs):
209 """Adds a ``--version`` option which immediately ends the program
210 printing out the version number. This is implemented as an eager
211 option that prints the version and exits the program in the callback.
212
213 :param version: the version number to show. If not provided click
214 attempts an auto discovery via setuptools.
215 :param prog_name: the name of the program (defaults to autodetection)
216 :param message: custom message to show instead of the default
217 (``'%(prog)s, version %(version)s'``)
218 :param others: everything else is forwarded to :func:`option`.
219 """
220 if version is None:
221 module = sys._getframe(1).f_globals.get('__name__')
222 def decorator(f):
223 prog_name = attrs.pop('prog_name', None)
224 message = attrs.pop('message', '%(prog)s, version %(version)s')
225
226 def callback(ctx, param, value):
227 if not value or ctx.resilient_parsing:
228 return
229 prog = prog_name
230 if prog is None:
231 prog = ctx.find_root().info_name
232 ver = version
233 if ver is None:
234 try:
235 import pkg_resources
236 except ImportError:
237 pass
238 else:
239 for dist in pkg_resources.working_set:
240 scripts = dist.get_entry_map().get('console_scripts') or {}
241 for script_name, entry_point in iteritems(scripts):
242 if entry_point.module_name == module:
243 ver = dist.version
244 break
245 if ver is None:
246 raise RuntimeError('Could not determine version')
247 echo(message % {
248 'prog': prog,
249 'version': ver,
250 })
251 ctx.exit()
252
253 attrs.setdefault('is_flag', True)
254 attrs.setdefault('expose_value', False)
255 attrs.setdefault('is_eager', True)
256 attrs.setdefault('help', 'Show the version and exit.')
257 attrs['callback'] = callback
258 return option(*(param_decls or ('--version',)), **attrs)(f)
259 return decorator
260
261
262 def help_option(*param_decls, **attrs):
263 """Adds a ``--help`` option which immediately ends the program
264 printing out the help page. This is usually unnecessary to add as
265 this is added by default to all commands unless suppressed.
266
267 Like :func:`version_option`, this is implemented as eager option that
268 prints in the callback and exits.
269
270 All arguments are forwarded to :func:`option`.
271 """
272 def decorator(f):
273 def callback(ctx, param, value):
274 if value and not ctx.resilient_parsing:
275 echo(ctx.get_help())
276 ctx.exit()
277 attrs.setdefault('is_flag', True)
278 attrs.setdefault('expose_value', False)
279 attrs.setdefault('help', 'Show this message and exit.')
280 attrs.setdefault('is_eager', True)
281 attrs['callback'] = callback
282 return option(*(param_decls or ('--help',)), **attrs)(f)
283 return decorator
284
285
286 # Circular dependencies between core and decorators
287 from .core import Command, Group, Argument, Option
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/click/decorators.py b/click/decorators.py
--- a/click/decorators.py
+++ b/click/decorators.py
@@ -150,6 +150,8 @@
:attr:`Command.params` list.
"""
def decorator(f):
+ if 'help' in attrs:
+ attrs['help'] = inspect.cleandoc(attrs['help'])
_param_memo(f, Option(param_decls, **attrs))
return f
return decorator
| {"golden_diff": "diff --git a/click/decorators.py b/click/decorators.py\n--- a/click/decorators.py\n+++ b/click/decorators.py\n@@ -150,6 +150,8 @@\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n+ if 'help' in attrs:\n+ attrs['help'] = inspect.cleandoc(attrs['help'])\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n", "issue": "Automatically dedent help text of options?\n```\nimport click\n\[email protected]()\[email protected]('--foo', help=\"\"\"\n heyho\n\n i am\n\n multiline\n\"\"\")\ndef cli(foo):\n click.echo(foo)\n\ncli()\n```\n\nThis currently does not remove the leading whitespace from each paragraph in the help text for `--foo`:\n\n```\nuntitaker@untibox:~/projects/click, branch master\n$ python lol.py --help\nUsage: lol.py [OPTIONS]\n\nOptions:\n --foo TEXT\n heyho\n\n i am\n\n multiline\n --help Show this message and exit.\n```\n\nAlthough i am not sure if help texts for options are even supposed to get that\ncomplex.\n\n", "before_files": [{"content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom .utils import echo\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n f.__click_pass_context__ = True\n return f\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n attrs.setdefault('help', help)\n return cls(name=name or f.__name__.lower(),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n \"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n return _make_command(f, name, attrs, cls)\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Argument(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n module = sys._getframe(1).f_globals.get('__name__')\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n })\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help())\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py"}], "after_files": [{"content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom .utils import echo\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n f.__click_pass_context__ = True\n return f\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n attrs.setdefault('help', help)\n return cls(name=name or f.__name__.lower(),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n \"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n return _make_command(f, name, attrs, cls)\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Argument(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n if 'help' in attrs:\n attrs['help'] = inspect.cleandoc(attrs['help'])\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n module = sys._getframe(1).f_globals.get('__name__')\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n })\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help())\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py"}]} | 3,370 | 114 |
gh_patches_debug_24201 | rasdani/github-patches | git_diff | pandas-dev__pandas-16438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Additional examples for `json_normalize`
When handling JSON data, a common use case is to start with a list of hierarchically nested records with an unknown, or possibly inconsistent, layout, and transform them into a flat tabular structure. Pandas' existing `json_normalize` function handles this use case, but the examples in the function's documentation don't make this clear. It could be useful to provide some additional explanation and examples in these functions.
#### Code Sample
```python
data = [
...: {'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
...: {'name': {'given': 'Mose', 'family': 'Regner'}},
...: {'id': 2, 'name': 'Faye Raker'},
...: ]
json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
```
#### Problem description
Direct conversion to a data frame doesn't provide information about the nested structure. `pandas.read_json` is also designed to work with data that's already flat.
The existing [documentation for `json_normalize`](http://pandas.pydata.org/pandas-docs/version/0.20/generated/pandas.io.json.json_normalize.html) only includes an example of using it for a somewhat more complicated process. The [tutorial sections on JSON parsing](http://pandas.pydata.org/pandas-docs/version/0.20/io.html#normalization) use the same example. These items could be updated to include additional examples that would help others understand when and how to apply `json_normalize`
#### Output of ``pd.show_versions()``
<details>
INSTALLED VERSIONS
------------------
commit: None
python: 3.6.1.final.0
python-bits: 64
OS: Darwin
OS-release: 16.6.0
machine: x86_64
processor: i386
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
LOCALE: en_US.UTF-8
pandas: 0.20.1
pytest: 3.0.7
pip: 9.0.1
setuptools: 27.2.0
Cython: 0.25.2
numpy: 1.12.1
scipy: 0.19.0
xarray: None
IPython: 6.0.0
sphinx: 1.5.6
patsy: 0.4.1
dateutil: 2.6.0
pytz: 2017.2
blosc: None
bottleneck: 1.2.1
tables: 3.3.0
numexpr: 2.6.2
feather: None
matplotlib: 2.0.2
openpyxl: 2.4.7
xlrd: 1.0.0
xlwt: 1.2.0
xlsxwriter: 0.9.6
lxml: 3.7.3
bs4: 4.6.0
html5lib: 0.999
sqlalchemy: 1.1.9
pymysql: None
psycopg2: None
jinja2: 2.9.6
s3fs: None
pandas_gbq: None
pandas_datareader: None
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/io/json/normalize.py`
Content:
```
1 # ---------------------------------------------------------------------
2 # JSON normalization routines
3
4 import copy
5 from collections import defaultdict
6 import numpy as np
7
8 from pandas._libs.lib import convert_json_to_lines
9 from pandas import compat, DataFrame
10
11
12 def _convert_to_line_delimits(s):
13 """Helper function that converts json lists to line delimited json."""
14
15 # Determine we have a JSON list to turn to lines otherwise just return the
16 # json object, only lists can
17 if not s[0] == '[' and s[-1] == ']':
18 return s
19 s = s[1:-1]
20
21 return convert_json_to_lines(s)
22
23
24 def nested_to_record(ds, prefix="", sep=".", level=0):
25 """a simplified json_normalize
26
27 converts a nested dict into a flat dict ("record"), unlike json_normalize,
28 it does not attempt to extract a subset of the data.
29
30 Parameters
31 ----------
32 ds : dict or list of dicts
33 prefix: the prefix, optional, default: ""
34 sep : string, default '.'
35 Nested records will generate names separated by sep,
36 e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
37
38 .. versionadded:: 0.20.0
39
40 level: the number of levels in the jason string, optional, default: 0
41
42 Returns
43 -------
44 d - dict or list of dicts, matching `ds`
45
46 Examples
47 --------
48
49 IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
50 nested=dict(e=dict(c=1,d=2),d=2)))
51 Out[52]:
52 {'dict1.c': 1,
53 'dict1.d': 2,
54 'flat1': 1,
55 'nested.d': 2,
56 'nested.e.c': 1,
57 'nested.e.d': 2}
58 """
59 singleton = False
60 if isinstance(ds, dict):
61 ds = [ds]
62 singleton = True
63
64 new_ds = []
65 for d in ds:
66
67 new_d = copy.deepcopy(d)
68 for k, v in d.items():
69 # each key gets renamed with prefix
70 if not isinstance(k, compat.string_types):
71 k = str(k)
72 if level == 0:
73 newkey = k
74 else:
75 newkey = prefix + sep + k
76
77 # only dicts gets recurse-flattend
78 # only at level>1 do we rename the rest of the keys
79 if not isinstance(v, dict):
80 if level != 0: # so we skip copying for top level, common case
81 v = new_d.pop(k)
82 new_d[newkey] = v
83 continue
84 else:
85 v = new_d.pop(k)
86 new_d.update(nested_to_record(v, newkey, sep, level + 1))
87 new_ds.append(new_d)
88
89 if singleton:
90 return new_ds[0]
91 return new_ds
92
93
94 def json_normalize(data, record_path=None, meta=None,
95 meta_prefix=None,
96 record_prefix=None,
97 errors='raise',
98 sep='.'):
99 """
100 "Normalize" semi-structured JSON data into a flat table
101
102 Parameters
103 ----------
104 data : dict or list of dicts
105 Unserialized JSON objects
106 record_path : string or list of strings, default None
107 Path in each object to list of records. If not passed, data will be
108 assumed to be an array of records
109 meta : list of paths (string or list of strings), default None
110 Fields to use as metadata for each record in resulting table
111 record_prefix : string, default None
112 If True, prefix records with dotted (?) path, e.g. foo.bar.field if
113 path to records is ['foo', 'bar']
114 meta_prefix : string, default None
115 errors : {'raise', 'ignore'}, default 'raise'
116
117 * 'ignore' : will ignore KeyError if keys listed in meta are not
118 always present
119 * 'raise' : will raise KeyError if keys listed in meta are not
120 always present
121
122 .. versionadded:: 0.20.0
123
124 sep : string, default '.'
125 Nested records will generate names separated by sep,
126 e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
127
128 .. versionadded:: 0.20.0
129
130
131 Returns
132 -------
133 frame : DataFrame
134
135 Examples
136 --------
137
138 >>> data = [{'state': 'Florida',
139 ... 'shortname': 'FL',
140 ... 'info': {
141 ... 'governor': 'Rick Scott'
142 ... },
143 ... 'counties': [{'name': 'Dade', 'population': 12345},
144 ... {'name': 'Broward', 'population': 40000},
145 ... {'name': 'Palm Beach', 'population': 60000}]},
146 ... {'state': 'Ohio',
147 ... 'shortname': 'OH',
148 ... 'info': {
149 ... 'governor': 'John Kasich'
150 ... },
151 ... 'counties': [{'name': 'Summit', 'population': 1234},
152 ... {'name': 'Cuyahoga', 'population': 1337}]}]
153 >>> from pandas.io.json import json_normalize
154 >>> result = json_normalize(data, 'counties', ['state', 'shortname',
155 ... ['info', 'governor']])
156 >>> result
157 name population info.governor state shortname
158 0 Dade 12345 Rick Scott Florida FL
159 1 Broward 40000 Rick Scott Florida FL
160 2 Palm Beach 60000 Rick Scott Florida FL
161 3 Summit 1234 John Kasich Ohio OH
162 4 Cuyahoga 1337 John Kasich Ohio OH
163
164 """
165 def _pull_field(js, spec):
166 result = js
167 if isinstance(spec, list):
168 for field in spec:
169 result = result[field]
170 else:
171 result = result[spec]
172
173 return result
174
175 if isinstance(data, list) and len(data) is 0:
176 return DataFrame()
177
178 # A bit of a hackjob
179 if isinstance(data, dict):
180 data = [data]
181
182 if record_path is None:
183 if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
184 # naive normalization, this is idempotent for flat records
185 # and potentially will inflate the data considerably for
186 # deeply nested structures:
187 # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
188 #
189 # TODO: handle record value which are lists, at least error
190 # reasonably
191 data = nested_to_record(data, sep=sep)
192 return DataFrame(data)
193 elif not isinstance(record_path, list):
194 record_path = [record_path]
195
196 if meta is None:
197 meta = []
198 elif not isinstance(meta, list):
199 meta = [meta]
200
201 for i, x in enumerate(meta):
202 if not isinstance(x, list):
203 meta[i] = [x]
204
205 # Disastrously inefficient for now
206 records = []
207 lengths = []
208
209 meta_vals = defaultdict(list)
210 if not isinstance(sep, compat.string_types):
211 sep = str(sep)
212 meta_keys = [sep.join(val) for val in meta]
213
214 def _recursive_extract(data, path, seen_meta, level=0):
215 if len(path) > 1:
216 for obj in data:
217 for val, key in zip(meta, meta_keys):
218 if level + 1 == len(val):
219 seen_meta[key] = _pull_field(obj, val[-1])
220
221 _recursive_extract(obj[path[0]], path[1:],
222 seen_meta, level=level + 1)
223 else:
224 for obj in data:
225 recs = _pull_field(obj, path[0])
226
227 # For repeating the metadata later
228 lengths.append(len(recs))
229
230 for val, key in zip(meta, meta_keys):
231 if level + 1 > len(val):
232 meta_val = seen_meta[key]
233 else:
234 try:
235 meta_val = _pull_field(obj, val[level:])
236 except KeyError as e:
237 if errors == 'ignore':
238 meta_val = np.nan
239 else:
240 raise \
241 KeyError("Try running with "
242 "errors='ignore' as key "
243 "%s is not always present", e)
244 meta_vals[key].append(meta_val)
245
246 records.extend(recs)
247
248 _recursive_extract(data, record_path, {}, level=0)
249
250 result = DataFrame(records)
251
252 if record_prefix is not None:
253 result.rename(columns=lambda x: record_prefix + x, inplace=True)
254
255 # Data types, a problem
256 for k, v in compat.iteritems(meta_vals):
257 if meta_prefix is not None:
258 k = meta_prefix + k
259
260 if k in result:
261 raise ValueError('Conflicting metadata name %s, '
262 'need distinguishing prefix ' % k)
263
264 result[k] = np.array(v).repeat(lengths)
265
266 return result
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -135,6 +135,16 @@
Examples
--------
+ >>> from pandas.io.json import json_normalize
+ >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
+ ... {'name': {'given': 'Mose', 'family': 'Regner'}},
+ ... {'id': 2, 'name': 'Faye Raker'}]
+ >>> json_normalize(data)
+ id name name.family name.first name.given name.last
+ 0 1.0 NaN NaN Coleen NaN Volk
+ 1 NaN NaN Regner NaN Mose NaN
+ 2 2.0 Faye Raker NaN NaN NaN NaN
+
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
@@ -150,7 +160,6 @@
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
- >>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
| {"golden_diff": "diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py\n--- a/pandas/io/json/normalize.py\n+++ b/pandas/io/json/normalize.py\n@@ -135,6 +135,16 @@\n Examples\n --------\n \n+ >>> from pandas.io.json import json_normalize\n+ >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},\n+ ... {'name': {'given': 'Mose', 'family': 'Regner'}},\n+ ... {'id': 2, 'name': 'Faye Raker'}]\n+ >>> json_normalize(data)\n+ id name name.family name.first name.given name.last\n+ 0 1.0 NaN NaN Coleen NaN Volk\n+ 1 NaN NaN Regner NaN Mose NaN\n+ 2 2.0 Faye Raker NaN NaN NaN NaN\n+\n >>> data = [{'state': 'Florida',\n ... 'shortname': 'FL',\n ... 'info': {\n@@ -150,7 +160,6 @@\n ... },\n ... 'counties': [{'name': 'Summit', 'population': 1234},\n ... {'name': 'Cuyahoga', 'population': 1337}]}]\n- >>> from pandas.io.json import json_normalize\n >>> result = json_normalize(data, 'counties', ['state', 'shortname',\n ... ['info', 'governor']])\n >>> result\n", "issue": "DOC: Additional examples for `json_normalize`\nWhen handling JSON data, a common use case is to start with a list of hierarchically nested records with an unknown, or possibly inconsistent, layout, and transform them into a flat tabular structure. Pandas' existing `json_normalize` function handles this use case, but the examples in the function's documentation don't make this clear. It could be useful to provide some additional explanation and examples in these functions.\r\n\r\n#### Code Sample\r\n\r\n```python\r\ndata = [\r\n ...: {'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},\r\n ...: {'name': {'given': 'Mose', 'family': 'Regner'}},\r\n ...: {'id': 2, 'name': 'Faye Raker'},\r\n ...: ]\r\n\r\njson_normalize(data)\r\n\r\n id name name.family name.first name.given name.last\r\n0 1.0 NaN NaN Coleen NaN Volk\r\n1 NaN NaN Regner NaN Mose NaN\r\n2 2.0 Faye Raker NaN NaN NaN NaN\r\n```\r\n\r\n#### Problem description\r\n\r\nDirect conversion to a data frame doesn't provide information about the nested structure. `pandas.read_json` is also designed to work with data that's already flat.\r\n\r\nThe existing [documentation for `json_normalize`](http://pandas.pydata.org/pandas-docs/version/0.20/generated/pandas.io.json.json_normalize.html) only includes an example of using it for a somewhat more complicated process. The [tutorial sections on JSON parsing](http://pandas.pydata.org/pandas-docs/version/0.20/io.html#normalization) use the same example. These items could be updated to include additional examples that would help others understand when and how to apply `json_normalize`\r\n\r\n#### Output of ``pd.show_versions()``\r\n\r\n<details>\r\nINSTALLED VERSIONS\r\n------------------\r\ncommit: None\r\npython: 3.6.1.final.0\r\npython-bits: 64\r\nOS: Darwin\r\nOS-release: 16.6.0\r\nmachine: x86_64\r\nprocessor: i386\r\nbyteorder: little\r\nLC_ALL: None\r\nLANG: en_US.UTF-8\r\nLOCALE: en_US.UTF-8\r\n\r\npandas: 0.20.1\r\npytest: 3.0.7\r\npip: 9.0.1\r\nsetuptools: 27.2.0\r\nCython: 0.25.2\r\nnumpy: 1.12.1\r\nscipy: 0.19.0\r\nxarray: None\r\nIPython: 6.0.0\r\nsphinx: 1.5.6\r\npatsy: 0.4.1\r\ndateutil: 2.6.0\r\npytz: 2017.2\r\nblosc: None\r\nbottleneck: 1.2.1\r\ntables: 3.3.0\r\nnumexpr: 2.6.2\r\nfeather: None\r\nmatplotlib: 2.0.2\r\nopenpyxl: 2.4.7\r\nxlrd: 1.0.0\r\nxlwt: 1.2.0\r\nxlsxwriter: 0.9.6\r\nlxml: 3.7.3\r\nbs4: 4.6.0\r\nhtml5lib: 0.999\r\nsqlalchemy: 1.1.9\r\npymysql: None\r\npsycopg2: None\r\njinja2: 2.9.6\r\ns3fs: None\r\npandas_gbq: None\r\npandas_datareader: None\r\n\r\n</details>\r\n\n", "before_files": [{"content": "# ---------------------------------------------------------------------\n# JSON normalization routines\n\nimport copy\nfrom collections import defaultdict\nimport numpy as np\n\nfrom pandas._libs.lib import convert_json_to_lines\nfrom pandas import compat, DataFrame\n\n\ndef _convert_to_line_delimits(s):\n \"\"\"Helper function that converts json lists to line delimited json.\"\"\"\n\n # Determine we have a JSON list to turn to lines otherwise just return the\n # json object, only lists can\n if not s[0] == '[' and s[-1] == ']':\n return s\n s = s[1:-1]\n\n return convert_json_to_lines(s)\n\n\ndef nested_to_record(ds, prefix=\"\", sep=\".\", level=0):\n \"\"\"a simplified json_normalize\n\n converts a nested dict into a flat dict (\"record\"), unlike json_normalize,\n it does not attempt to extract a subset of the data.\n\n Parameters\n ----------\n ds : dict or list of dicts\n prefix: the prefix, optional, default: \"\"\n sep : string, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n .. versionadded:: 0.20.0\n\n level: the number of levels in the jason string, optional, default: 0\n\n Returns\n -------\n d - dict or list of dicts, matching `ds`\n\n Examples\n --------\n\n IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),\n nested=dict(e=dict(c=1,d=2),d=2)))\n Out[52]:\n {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1,\n 'nested.d': 2,\n 'nested.e.c': 1,\n 'nested.e.d': 2}\n \"\"\"\n singleton = False\n if isinstance(ds, dict):\n ds = [ds]\n singleton = True\n\n new_ds = []\n for d in ds:\n\n new_d = copy.deepcopy(d)\n for k, v in d.items():\n # each key gets renamed with prefix\n if not isinstance(k, compat.string_types):\n k = str(k)\n if level == 0:\n newkey = k\n else:\n newkey = prefix + sep + k\n\n # only dicts gets recurse-flattend\n # only at level>1 do we rename the rest of the keys\n if not isinstance(v, dict):\n if level != 0: # so we skip copying for top level, common case\n v = new_d.pop(k)\n new_d[newkey] = v\n continue\n else:\n v = new_d.pop(k)\n new_d.update(nested_to_record(v, newkey, sep, level + 1))\n new_ds.append(new_d)\n\n if singleton:\n return new_ds[0]\n return new_ds\n\n\ndef json_normalize(data, record_path=None, meta=None,\n meta_prefix=None,\n record_prefix=None,\n errors='raise',\n sep='.'):\n \"\"\"\n \"Normalize\" semi-structured JSON data into a flat table\n\n Parameters\n ----------\n data : dict or list of dicts\n Unserialized JSON objects\n record_path : string or list of strings, default None\n Path in each object to list of records. If not passed, data will be\n assumed to be an array of records\n meta : list of paths (string or list of strings), default None\n Fields to use as metadata for each record in resulting table\n record_prefix : string, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n path to records is ['foo', 'bar']\n meta_prefix : string, default None\n errors : {'raise', 'ignore'}, default 'raise'\n\n * 'ignore' : will ignore KeyError if keys listed in meta are not\n always present\n * 'raise' : will raise KeyError if keys listed in meta are not\n always present\n\n .. versionadded:: 0.20.0\n\n sep : string, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n .. versionadded:: 0.20.0\n\n\n Returns\n -------\n frame : DataFrame\n\n Examples\n --------\n\n >>> data = [{'state': 'Florida',\n ... 'shortname': 'FL',\n ... 'info': {\n ... 'governor': 'Rick Scott'\n ... },\n ... 'counties': [{'name': 'Dade', 'population': 12345},\n ... {'name': 'Broward', 'population': 40000},\n ... {'name': 'Palm Beach', 'population': 60000}]},\n ... {'state': 'Ohio',\n ... 'shortname': 'OH',\n ... 'info': {\n ... 'governor': 'John Kasich'\n ... },\n ... 'counties': [{'name': 'Summit', 'population': 1234},\n ... {'name': 'Cuyahoga', 'population': 1337}]}]\n >>> from pandas.io.json import json_normalize\n >>> result = json_normalize(data, 'counties', ['state', 'shortname',\n ... ['info', 'governor']])\n >>> result\n name population info.governor state shortname\n 0 Dade 12345 Rick Scott Florida FL\n 1 Broward 40000 Rick Scott Florida FL\n 2 Palm Beach 60000 Rick Scott Florida FL\n 3 Summit 1234 John Kasich Ohio OH\n 4 Cuyahoga 1337 John Kasich Ohio OH\n\n \"\"\"\n def _pull_field(js, spec):\n result = js\n if isinstance(spec, list):\n for field in spec:\n result = result[field]\n else:\n result = result[spec]\n\n return result\n\n if isinstance(data, list) and len(data) is 0:\n return DataFrame()\n\n # A bit of a hackjob\n if isinstance(data, dict):\n data = [data]\n\n if record_path is None:\n if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):\n # naive normalization, this is idempotent for flat records\n # and potentially will inflate the data considerably for\n # deeply nested structures:\n # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}\n #\n # TODO: handle record value which are lists, at least error\n # reasonably\n data = nested_to_record(data, sep=sep)\n return DataFrame(data)\n elif not isinstance(record_path, list):\n record_path = [record_path]\n\n if meta is None:\n meta = []\n elif not isinstance(meta, list):\n meta = [meta]\n\n for i, x in enumerate(meta):\n if not isinstance(x, list):\n meta[i] = [x]\n\n # Disastrously inefficient for now\n records = []\n lengths = []\n\n meta_vals = defaultdict(list)\n if not isinstance(sep, compat.string_types):\n sep = str(sep)\n meta_keys = [sep.join(val) for val in meta]\n\n def _recursive_extract(data, path, seen_meta, level=0):\n if len(path) > 1:\n for obj in data:\n for val, key in zip(meta, meta_keys):\n if level + 1 == len(val):\n seen_meta[key] = _pull_field(obj, val[-1])\n\n _recursive_extract(obj[path[0]], path[1:],\n seen_meta, level=level + 1)\n else:\n for obj in data:\n recs = _pull_field(obj, path[0])\n\n # For repeating the metadata later\n lengths.append(len(recs))\n\n for val, key in zip(meta, meta_keys):\n if level + 1 > len(val):\n meta_val = seen_meta[key]\n else:\n try:\n meta_val = _pull_field(obj, val[level:])\n except KeyError as e:\n if errors == 'ignore':\n meta_val = np.nan\n else:\n raise \\\n KeyError(\"Try running with \"\n \"errors='ignore' as key \"\n \"%s is not always present\", e)\n meta_vals[key].append(meta_val)\n\n records.extend(recs)\n\n _recursive_extract(data, record_path, {}, level=0)\n\n result = DataFrame(records)\n\n if record_prefix is not None:\n result.rename(columns=lambda x: record_prefix + x, inplace=True)\n\n # Data types, a problem\n for k, v in compat.iteritems(meta_vals):\n if meta_prefix is not None:\n k = meta_prefix + k\n\n if k in result:\n raise ValueError('Conflicting metadata name %s, '\n 'need distinguishing prefix ' % k)\n\n result[k] = np.array(v).repeat(lengths)\n\n return result\n", "path": "pandas/io/json/normalize.py"}], "after_files": [{"content": "# ---------------------------------------------------------------------\n# JSON normalization routines\n\nimport copy\nfrom collections import defaultdict\nimport numpy as np\n\nfrom pandas._libs.lib import convert_json_to_lines\nfrom pandas import compat, DataFrame\n\n\ndef _convert_to_line_delimits(s):\n \"\"\"Helper function that converts json lists to line delimited json.\"\"\"\n\n # Determine we have a JSON list to turn to lines otherwise just return the\n # json object, only lists can\n if not s[0] == '[' and s[-1] == ']':\n return s\n s = s[1:-1]\n\n return convert_json_to_lines(s)\n\n\ndef nested_to_record(ds, prefix=\"\", sep=\".\", level=0):\n \"\"\"a simplified json_normalize\n\n converts a nested dict into a flat dict (\"record\"), unlike json_normalize,\n it does not attempt to extract a subset of the data.\n\n Parameters\n ----------\n ds : dict or list of dicts\n prefix: the prefix, optional, default: \"\"\n sep : string, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n .. versionadded:: 0.20.0\n\n level: the number of levels in the jason string, optional, default: 0\n\n Returns\n -------\n d - dict or list of dicts, matching `ds`\n\n Examples\n --------\n\n IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),\n nested=dict(e=dict(c=1,d=2),d=2)))\n Out[52]:\n {'dict1.c': 1,\n 'dict1.d': 2,\n 'flat1': 1,\n 'nested.d': 2,\n 'nested.e.c': 1,\n 'nested.e.d': 2}\n \"\"\"\n singleton = False\n if isinstance(ds, dict):\n ds = [ds]\n singleton = True\n\n new_ds = []\n for d in ds:\n\n new_d = copy.deepcopy(d)\n for k, v in d.items():\n # each key gets renamed with prefix\n if not isinstance(k, compat.string_types):\n k = str(k)\n if level == 0:\n newkey = k\n else:\n newkey = prefix + sep + k\n\n # only dicts gets recurse-flattend\n # only at level>1 do we rename the rest of the keys\n if not isinstance(v, dict):\n if level != 0: # so we skip copying for top level, common case\n v = new_d.pop(k)\n new_d[newkey] = v\n continue\n else:\n v = new_d.pop(k)\n new_d.update(nested_to_record(v, newkey, sep, level + 1))\n new_ds.append(new_d)\n\n if singleton:\n return new_ds[0]\n return new_ds\n\n\ndef json_normalize(data, record_path=None, meta=None,\n meta_prefix=None,\n record_prefix=None,\n errors='raise',\n sep='.'):\n \"\"\"\n \"Normalize\" semi-structured JSON data into a flat table\n\n Parameters\n ----------\n data : dict or list of dicts\n Unserialized JSON objects\n record_path : string or list of strings, default None\n Path in each object to list of records. If not passed, data will be\n assumed to be an array of records\n meta : list of paths (string or list of strings), default None\n Fields to use as metadata for each record in resulting table\n record_prefix : string, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n path to records is ['foo', 'bar']\n meta_prefix : string, default None\n errors : {'raise', 'ignore'}, default 'raise'\n\n * 'ignore' : will ignore KeyError if keys listed in meta are not\n always present\n * 'raise' : will raise KeyError if keys listed in meta are not\n always present\n\n .. versionadded:: 0.20.0\n\n sep : string, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n .. versionadded:: 0.20.0\n\n\n Returns\n -------\n frame : DataFrame\n\n Examples\n --------\n\n >>> from pandas.io.json import json_normalize\n >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},\n ... {'name': {'given': 'Mose', 'family': 'Regner'}},\n ... {'id': 2, 'name': 'Faye Raker'}]\n >>> json_normalize(data)\n id name name.family name.first name.given name.last\n 0 1.0 NaN NaN Coleen NaN Volk\n 1 NaN NaN Regner NaN Mose NaN\n 2 2.0 Faye Raker NaN NaN NaN NaN\n\n >>> data = [{'state': 'Florida',\n ... 'shortname': 'FL',\n ... 'info': {\n ... 'governor': 'Rick Scott'\n ... },\n ... 'counties': [{'name': 'Dade', 'population': 12345},\n ... {'name': 'Broward', 'population': 40000},\n ... {'name': 'Palm Beach', 'population': 60000}]},\n ... {'state': 'Ohio',\n ... 'shortname': 'OH',\n ... 'info': {\n ... 'governor': 'John Kasich'\n ... },\n ... 'counties': [{'name': 'Summit', 'population': 1234},\n ... {'name': 'Cuyahoga', 'population': 1337}]}]\n >>> result = json_normalize(data, 'counties', ['state', 'shortname',\n ... ['info', 'governor']])\n >>> result\n name population info.governor state shortname\n 0 Dade 12345 Rick Scott Florida FL\n 1 Broward 40000 Rick Scott Florida FL\n 2 Palm Beach 60000 Rick Scott Florida FL\n 3 Summit 1234 John Kasich Ohio OH\n 4 Cuyahoga 1337 John Kasich Ohio OH\n\n \"\"\"\n def _pull_field(js, spec):\n result = js\n if isinstance(spec, list):\n for field in spec:\n result = result[field]\n else:\n result = result[spec]\n\n return result\n\n if isinstance(data, list) and len(data) is 0:\n return DataFrame()\n\n # A bit of a hackjob\n if isinstance(data, dict):\n data = [data]\n\n if record_path is None:\n if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):\n # naive normalization, this is idempotent for flat records\n # and potentially will inflate the data considerably for\n # deeply nested structures:\n # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}\n #\n # TODO: handle record value which are lists, at least error\n # reasonably\n data = nested_to_record(data, sep=sep)\n return DataFrame(data)\n elif not isinstance(record_path, list):\n record_path = [record_path]\n\n if meta is None:\n meta = []\n elif not isinstance(meta, list):\n meta = [meta]\n\n for i, x in enumerate(meta):\n if not isinstance(x, list):\n meta[i] = [x]\n\n # Disastrously inefficient for now\n records = []\n lengths = []\n\n meta_vals = defaultdict(list)\n if not isinstance(sep, compat.string_types):\n sep = str(sep)\n meta_keys = [sep.join(val) for val in meta]\n\n def _recursive_extract(data, path, seen_meta, level=0):\n if len(path) > 1:\n for obj in data:\n for val, key in zip(meta, meta_keys):\n if level + 1 == len(val):\n seen_meta[key] = _pull_field(obj, val[-1])\n\n _recursive_extract(obj[path[0]], path[1:],\n seen_meta, level=level + 1)\n else:\n for obj in data:\n recs = _pull_field(obj, path[0])\n\n # For repeating the metadata later\n lengths.append(len(recs))\n\n for val, key in zip(meta, meta_keys):\n if level + 1 > len(val):\n meta_val = seen_meta[key]\n else:\n try:\n meta_val = _pull_field(obj, val[level:])\n except KeyError as e:\n if errors == 'ignore':\n meta_val = np.nan\n else:\n raise \\\n KeyError(\"Try running with \"\n \"errors='ignore' as key \"\n \"%s is not always present\", e)\n meta_vals[key].append(meta_val)\n\n records.extend(recs)\n\n _recursive_extract(data, record_path, {}, level=0)\n\n result = DataFrame(records)\n\n if record_prefix is not None:\n result.rename(columns=lambda x: record_prefix + x, inplace=True)\n\n # Data types, a problem\n for k, v in compat.iteritems(meta_vals):\n if meta_prefix is not None:\n k = meta_prefix + k\n\n if k in result:\n raise ValueError('Conflicting metadata name %s, '\n 'need distinguishing prefix ' % k)\n\n result[k] = np.array(v).repeat(lengths)\n\n return result\n", "path": "pandas/io/json/normalize.py"}]} | 3,832 | 365 |
gh_patches_debug_25000 | rasdani/github-patches | git_diff | adfinis__timed-backend-938 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: remaining effort on initial report creation is buggy
## Scenario
Create a report and add an estimated remaining effort to it.
## Expected behavior
It updates the remaining effort of the certain task in the statistics view.
## Actual behavior
After submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view.
The attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timed/tracking/signals.py`
Content:
```
1 from django.db.models import Sum
2 from django.db.models.signals import pre_save
3 from django.dispatch import receiver
4
5 from timed.tracking.models import Report
6
7
8 @receiver(pre_save, sender=Report)
9 def update_rejected_on_reports(sender, instance, **kwargs):
10 """Unreject report when the task changes."""
11 # Check if the report is being created or updated
12 if instance.pk and instance.rejected:
13 report = Report.objects.get(id=instance.id)
14 if report.task_id != instance.task_id:
15 instance.rejected = False
16
17
18 @receiver(pre_save, sender=Report)
19 def update_most_recent_remaining_effort(sender, instance, **kwargs):
20 """Update remaining effort on task, if remaining effort tracking is active.
21
22 Update most_recent_remaining_effort on task and total_remaining_effort on project
23 only if remaining effort on report has changed.
24 Any other change on report should not trigger this signal.
25 """
26 if kwargs.get("raw", False): # pragma: no cover
27 return
28
29 if not instance.pk:
30 return
31 if instance.task.project.remaining_effort_tracking is not True:
32 return
33
34 if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:
35 task = instance.task
36 task.most_recent_remaining_effort = instance.remaining_effort
37 task.save()
38
39 project = task.project
40 total_remaining_effort = (
41 project.tasks.all()
42 .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
43 .get("sum_remaining")
44 )
45 project.total_remaining_effort = total_remaining_effort
46 project.save()
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py
--- a/timed/tracking/signals.py
+++ b/timed/tracking/signals.py
@@ -26,21 +26,30 @@
if kwargs.get("raw", False): # pragma: no cover
return
- if not instance.pk:
- return
if instance.task.project.remaining_effort_tracking is not True:
return
+ # update most_recent_remaining_effort and total_remaining_effort on report creation
+ if not instance.pk:
+ update_remaining_effort(instance)
+ return
+
+ # check if remaining effort has changed on report update
if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:
- task = instance.task
- task.most_recent_remaining_effort = instance.remaining_effort
- task.save()
-
- project = task.project
- total_remaining_effort = (
- project.tasks.all()
- .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
- .get("sum_remaining")
- )
- project.total_remaining_effort = total_remaining_effort
- project.save()
+ update_remaining_effort(instance)
+
+
+def update_remaining_effort(report):
+ task = report.task
+ project = task.project
+
+ task.most_recent_remaining_effort = report.remaining_effort
+ task.save()
+
+ total_remaining_effort = (
+ task.project.tasks.all()
+ .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
+ .get("sum_remaining")
+ )
+ project.total_remaining_effort = total_remaining_effort
+ project.save()
| {"golden_diff": "diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py\n--- a/timed/tracking/signals.py\n+++ b/timed/tracking/signals.py\n@@ -26,21 +26,30 @@\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n \n- if not instance.pk:\n- return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n \n+ # update most_recent_remaining_effort and total_remaining_effort on report creation\n+ if not instance.pk:\n+ update_remaining_effort(instance)\n+ return\n+\n+ # check if remaining effort has changed on report update\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n- task = instance.task\n- task.most_recent_remaining_effort = instance.remaining_effort\n- task.save()\n-\n- project = task.project\n- total_remaining_effort = (\n- project.tasks.all()\n- .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n- .get(\"sum_remaining\")\n- )\n- project.total_remaining_effort = total_remaining_effort\n- project.save()\n+ update_remaining_effort(instance)\n+\n+\n+def update_remaining_effort(report):\n+ task = report.task\n+ project = task.project\n+\n+ task.most_recent_remaining_effort = report.remaining_effort\n+ task.save()\n+\n+ total_remaining_effort = (\n+ task.project.tasks.all()\n+ .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n+ .get(\"sum_remaining\")\n+ )\n+ project.total_remaining_effort = total_remaining_effort\n+ project.save()\n", "issue": "bug: remaining effort on initial report creation is buggy\n## Scenario\r\nCreate a report and add an estimated remaining effort to it.\r\n\r\n## Expected behavior\r\nIt updates the remaining effort of the certain task in the statistics view.\r\n\r\n## Actual behavior\r\nAfter submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view.\r\n\r\nThe attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update?\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Sum\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom timed.tracking.models import Report\n\n\n@receiver(pre_save, sender=Report)\ndef update_rejected_on_reports(sender, instance, **kwargs):\n \"\"\"Unreject report when the task changes.\"\"\"\n # Check if the report is being created or updated\n if instance.pk and instance.rejected:\n report = Report.objects.get(id=instance.id)\n if report.task_id != instance.task_id:\n instance.rejected = False\n\n\n@receiver(pre_save, sender=Report)\ndef update_most_recent_remaining_effort(sender, instance, **kwargs):\n \"\"\"Update remaining effort on task, if remaining effort tracking is active.\n\n Update most_recent_remaining_effort on task and total_remaining_effort on project\n only if remaining effort on report has changed.\n Any other change on report should not trigger this signal.\n \"\"\"\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n\n if not instance.pk:\n return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n task = instance.task\n task.most_recent_remaining_effort = instance.remaining_effort\n task.save()\n\n project = task.project\n total_remaining_effort = (\n project.tasks.all()\n .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n .get(\"sum_remaining\")\n )\n project.total_remaining_effort = total_remaining_effort\n project.save()\n", "path": "timed/tracking/signals.py"}], "after_files": [{"content": "from django.db.models import Sum\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom timed.tracking.models import Report\n\n\n@receiver(pre_save, sender=Report)\ndef update_rejected_on_reports(sender, instance, **kwargs):\n \"\"\"Unreject report when the task changes.\"\"\"\n # Check if the report is being created or updated\n if instance.pk and instance.rejected:\n report = Report.objects.get(id=instance.id)\n if report.task_id != instance.task_id:\n instance.rejected = False\n\n\n@receiver(pre_save, sender=Report)\ndef update_most_recent_remaining_effort(sender, instance, **kwargs):\n \"\"\"Update remaining effort on task, if remaining effort tracking is active.\n\n Update most_recent_remaining_effort on task and total_remaining_effort on project\n only if remaining effort on report has changed.\n Any other change on report should not trigger this signal.\n \"\"\"\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n\n if instance.task.project.remaining_effort_tracking is not True:\n return\n\n # update most_recent_remaining_effort and total_remaining_effort on report creation\n if not instance.pk:\n update_remaining_effort(instance)\n return\n\n # check if remaining effort has changed on report update\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n update_remaining_effort(instance)\n\n\ndef update_remaining_effort(report):\n task = report.task\n project = task.project\n\n task.most_recent_remaining_effort = report.remaining_effort\n task.save()\n\n total_remaining_effort = (\n task.project.tasks.all()\n .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n .get(\"sum_remaining\")\n )\n project.total_remaining_effort = total_remaining_effort\n project.save()\n", "path": "timed/tracking/signals.py"}]} | 892 | 376 |
gh_patches_debug_34733 | rasdani/github-patches | git_diff | pre-commit__pre-commit-231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: base manifest value for 'exclude' is always ignored
I stumbled upon this bug while working on #226: the culprit is [`Repository.hooks`](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/repository.py#L48).
A quick fix for this would be to simply remove the default value from `pre_commit/clientlib/validate_config.py`, but the root cause is that any default value defined for a field in this file will make the corresponding manifest field useless.
Basically here is what happens in `Repository.hooks`:
- all the hooks defined in the current repository are enumerated
- at this stage, a `hook` is a dict closely matching the Yaml the config file content, **plus** default values for fields not defined in the Yaml but having a JSON schema 'default'
- when doing the dict merge, **every** (key,value) pair in `hook` overrides the corresponding manifest entry. This includes default config value like `exclude: '$^'` overriding a base manifest value like `exclude: '.bak$'`
Hence I suggest either adding a test ensuring there will never be any 'default' defined in `CONFIG_JSON_SCHEMA`, or improving the merge logic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/clientlib/validate_config.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from pre_commit.clientlib.validate_base import get_run_function
4 from pre_commit.clientlib.validate_base import get_validator
5 from pre_commit.clientlib.validate_base import is_regex_valid
6 from pre_commit.errors import FatalError
7
8
9 _LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'
10
11
12 def is_local_hooks(repo_entry):
13 return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING
14
15
16 class InvalidConfigError(FatalError):
17 pass
18
19
20 CONFIG_JSON_SCHEMA = {
21 'type': 'array',
22 'minItems': 1,
23 'items': {
24 'type': 'object',
25 'properties': {
26 'repo': {'type': 'string'},
27 'sha': {'type': 'string'},
28 'hooks': {
29 'type': 'array',
30 'minItems': 1,
31 'items': {
32 'type': 'object',
33 'properties': {
34 'id': {'type': 'string'},
35 'files': {'type': 'string'},
36 'exclude': {'type': 'string', 'default': '^$'},
37 'language_version': {'type': 'string'},
38 'args': {
39 'type': 'array',
40 'items': {'type': 'string'},
41 },
42 },
43 'required': ['id'],
44 }
45 }
46 },
47 'required': ['repo', 'hooks'],
48 }
49 }
50
51
52 def try_regex(repo, hook, value, field_name):
53 if not is_regex_valid(value):
54 raise InvalidConfigError(
55 'Invalid {0} regex at {1}, {2}: {3}'.format(
56 field_name, repo, hook, value,
57 )
58 )
59
60
61 def validate_config_extra(config):
62 for repo in config:
63 if is_local_hooks(repo):
64 if 'sha' in repo:
65 raise InvalidConfigError(
66 '"sha" property provided for local hooks'
67 )
68 elif 'sha' not in repo:
69 raise InvalidConfigError(
70 'Missing "sha" field for repository {0}'.format(repo['repo'])
71 )
72 for hook in repo['hooks']:
73 try_regex(repo, hook['id'], hook.get('files', ''), 'files')
74 try_regex(repo, hook['id'], hook['exclude'], 'exclude')
75
76
77 load_config = get_validator(
78 CONFIG_JSON_SCHEMA,
79 InvalidConfigError,
80 additional_validation_strategy=validate_config_extra,
81 )
82
83
84 run = get_run_function('Config filenames.', load_config, InvalidConfigError)
85
86
87 if __name__ == '__main__':
88 exit(run())
89
```
Path: `pre_commit/clientlib/validate_manifest.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from pre_commit.clientlib.validate_base import get_run_function
4 from pre_commit.clientlib.validate_base import get_validator
5 from pre_commit.clientlib.validate_base import is_regex_valid
6 from pre_commit.languages.all import all_languages
7
8
9 class InvalidManifestError(ValueError):
10 pass
11
12
13 MANIFEST_JSON_SCHEMA = {
14 'type': 'array',
15 'minItems': 1,
16 'items': {
17 'type': 'object',
18 'properties': {
19 'id': {'type': 'string'},
20 'name': {'type': 'string'},
21 'description': {'type': 'string', 'default': ''},
22 'entry': {'type': 'string'},
23 'language': {'type': 'string'},
24 'language_version': {'type': 'string', 'default': 'default'},
25 'files': {'type': 'string'},
26 'expected_return_value': {'type': 'number', 'default': 0},
27 'args': {
28 'type': 'array',
29 'default': [],
30 'items': {
31 'type': 'string',
32 },
33 },
34 },
35 'required': ['id', 'name', 'entry', 'language', 'files'],
36 },
37 }
38
39
40 def validate_languages(hook_config):
41 if hook_config['language'] not in all_languages:
42 raise InvalidManifestError(
43 'Expected language {0} for {1} to be one of {2!r}'.format(
44 hook_config['id'],
45 hook_config['language'],
46 all_languages,
47 )
48 )
49
50
51 def validate_files(hook_config):
52 if not is_regex_valid(hook_config['files']):
53 raise InvalidManifestError(
54 'Invalid files regex at {0}: {1}'.format(
55 hook_config['id'],
56 hook_config['files'],
57 )
58 )
59
60
61 def additional_manifest_check(obj):
62 for hook_config in obj:
63 validate_languages(hook_config)
64 validate_files(hook_config)
65
66
67 load_manifest = get_validator(
68 MANIFEST_JSON_SCHEMA,
69 InvalidManifestError,
70 additional_manifest_check,
71 )
72
73
74 run = get_run_function(
75 'Manifest filenames.',
76 load_manifest,
77 InvalidManifestError,
78 )
79
80
81 if __name__ == '__main__':
82 exit(run())
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py
--- a/pre_commit/clientlib/validate_config.py
+++ b/pre_commit/clientlib/validate_config.py
@@ -33,7 +33,7 @@
'properties': {
'id': {'type': 'string'},
'files': {'type': 'string'},
- 'exclude': {'type': 'string', 'default': '^$'},
+ 'exclude': {'type': 'string'},
'language_version': {'type': 'string'},
'args': {
'type': 'array',
@@ -71,7 +71,7 @@
)
for hook in repo['hooks']:
try_regex(repo, hook['id'], hook.get('files', ''), 'files')
- try_regex(repo, hook['id'], hook['exclude'], 'exclude')
+ try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')
load_config = get_validator(
diff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py
--- a/pre_commit/clientlib/validate_manifest.py
+++ b/pre_commit/clientlib/validate_manifest.py
@@ -20,6 +20,7 @@
'name': {'type': 'string'},
'description': {'type': 'string', 'default': ''},
'entry': {'type': 'string'},
+ 'exclude': {'type': 'string', 'default': '^$'},
'language': {'type': 'string'},
'language_version': {'type': 'string', 'default': 'default'},
'files': {'type': 'string'},
@@ -52,8 +53,14 @@
if not is_regex_valid(hook_config['files']):
raise InvalidManifestError(
'Invalid files regex at {0}: {1}'.format(
- hook_config['id'],
- hook_config['files'],
+ hook_config['id'], hook_config['files'],
+ )
+ )
+
+ if not is_regex_valid(hook_config.get('exclude', '')):
+ raise InvalidManifestError(
+ 'Invalid exclude regex at {0}: {1}'.format(
+ hook_config['id'], hook_config['exclude'],
)
)
| {"golden_diff": "diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py\n--- a/pre_commit/clientlib/validate_config.py\n+++ b/pre_commit/clientlib/validate_config.py\n@@ -33,7 +33,7 @@\n 'properties': {\n 'id': {'type': 'string'},\n 'files': {'type': 'string'},\n- 'exclude': {'type': 'string', 'default': '^$'},\n+ 'exclude': {'type': 'string'},\n 'language_version': {'type': 'string'},\n 'args': {\n 'type': 'array',\n@@ -71,7 +71,7 @@\n )\n for hook in repo['hooks']:\n try_regex(repo, hook['id'], hook.get('files', ''), 'files')\n- try_regex(repo, hook['id'], hook['exclude'], 'exclude')\n+ try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')\n \n \n load_config = get_validator(\ndiff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py\n--- a/pre_commit/clientlib/validate_manifest.py\n+++ b/pre_commit/clientlib/validate_manifest.py\n@@ -20,6 +20,7 @@\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n+ 'exclude': {'type': 'string', 'default': '^$'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n@@ -52,8 +53,14 @@\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n- hook_config['id'],\n- hook_config['files'],\n+ hook_config['id'], hook_config['files'],\n+ )\n+ )\n+\n+ if not is_regex_valid(hook_config.get('exclude', '')):\n+ raise InvalidManifestError(\n+ 'Invalid exclude regex at {0}: {1}'.format(\n+ hook_config['id'], hook_config['exclude'],\n )\n )\n", "issue": "Bug: base manifest value for 'exclude' is always ignored\nI stumbled upon this bug while working on #226: the culprit is [`Repository.hooks`](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/repository.py#L48).\n\nA quick fix for this would be to simply remove the default value from `pre_commit/clientlib/validate_config.py`, but the root cause is that any default value defined for a field in this file will make the corresponding manifest field useless.\n\nBasically here is what happens in `Repository.hooks`:\n- all the hooks defined in the current repository are enumerated\n- at this stage, a `hook` is a dict closely matching the Yaml the config file content, **plus** default values for fields not defined in the Yaml but having a JSON schema 'default'\n- when doing the dict merge, **every** (key,value) pair in `hook` overrides the corresponding manifest entry. This includes default config value like `exclude: '$^'` overriding a base manifest value like `exclude: '.bak$'`\n\nHence I suggest either adding a test ensuring there will never be any 'default' defined in `CONFIG_JSON_SCHEMA`, or improving the merge logic.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.errors import FatalError\n\n\n_LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'\n\n\ndef is_local_hooks(repo_entry):\n return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\nCONFIG_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'repo': {'type': 'string'},\n 'sha': {'type': 'string'},\n 'hooks': {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'files': {'type': 'string'},\n 'exclude': {'type': 'string', 'default': '^$'},\n 'language_version': {'type': 'string'},\n 'args': {\n 'type': 'array',\n 'items': {'type': 'string'},\n },\n },\n 'required': ['id'],\n }\n }\n },\n 'required': ['repo', 'hooks'],\n }\n}\n\n\ndef try_regex(repo, hook, value, field_name):\n if not is_regex_valid(value):\n raise InvalidConfigError(\n 'Invalid {0} regex at {1}, {2}: {3}'.format(\n field_name, repo, hook, value,\n )\n )\n\n\ndef validate_config_extra(config):\n for repo in config:\n if is_local_hooks(repo):\n if 'sha' in repo:\n raise InvalidConfigError(\n '\"sha\" property provided for local hooks'\n )\n elif 'sha' not in repo:\n raise InvalidConfigError(\n 'Missing \"sha\" field for repository {0}'.format(repo['repo'])\n )\n for hook in repo['hooks']:\n try_regex(repo, hook['id'], hook.get('files', ''), 'files')\n try_regex(repo, hook['id'], hook['exclude'], 'exclude')\n\n\nload_config = get_validator(\n CONFIG_JSON_SCHEMA,\n InvalidConfigError,\n additional_validation_strategy=validate_config_extra,\n)\n\n\nrun = get_run_function('Config filenames.', load_config, InvalidConfigError)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_config.py"}, {"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.languages.all import all_languages\n\n\nclass InvalidManifestError(ValueError):\n pass\n\n\nMANIFEST_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n 'expected_return_value': {'type': 'number', 'default': 0},\n 'args': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n },\n 'required': ['id', 'name', 'entry', 'language', 'files'],\n },\n}\n\n\ndef validate_languages(hook_config):\n if hook_config['language'] not in all_languages:\n raise InvalidManifestError(\n 'Expected language {0} for {1} to be one of {2!r}'.format(\n hook_config['id'],\n hook_config['language'],\n all_languages,\n )\n )\n\n\ndef validate_files(hook_config):\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n hook_config['id'],\n hook_config['files'],\n )\n )\n\n\ndef additional_manifest_check(obj):\n for hook_config in obj:\n validate_languages(hook_config)\n validate_files(hook_config)\n\n\nload_manifest = get_validator(\n MANIFEST_JSON_SCHEMA,\n InvalidManifestError,\n additional_manifest_check,\n)\n\n\nrun = get_run_function(\n 'Manifest filenames.',\n load_manifest,\n InvalidManifestError,\n)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_manifest.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.errors import FatalError\n\n\n_LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'\n\n\ndef is_local_hooks(repo_entry):\n return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\nCONFIG_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'repo': {'type': 'string'},\n 'sha': {'type': 'string'},\n 'hooks': {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'files': {'type': 'string'},\n 'exclude': {'type': 'string'},\n 'language_version': {'type': 'string'},\n 'args': {\n 'type': 'array',\n 'items': {'type': 'string'},\n },\n },\n 'required': ['id'],\n }\n }\n },\n 'required': ['repo', 'hooks'],\n }\n}\n\n\ndef try_regex(repo, hook, value, field_name):\n if not is_regex_valid(value):\n raise InvalidConfigError(\n 'Invalid {0} regex at {1}, {2}: {3}'.format(\n field_name, repo, hook, value,\n )\n )\n\n\ndef validate_config_extra(config):\n for repo in config:\n if is_local_hooks(repo):\n if 'sha' in repo:\n raise InvalidConfigError(\n '\"sha\" property provided for local hooks'\n )\n elif 'sha' not in repo:\n raise InvalidConfigError(\n 'Missing \"sha\" field for repository {0}'.format(repo['repo'])\n )\n for hook in repo['hooks']:\n try_regex(repo, hook['id'], hook.get('files', ''), 'files')\n try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')\n\n\nload_config = get_validator(\n CONFIG_JSON_SCHEMA,\n InvalidConfigError,\n additional_validation_strategy=validate_config_extra,\n)\n\n\nrun = get_run_function('Config filenames.', load_config, InvalidConfigError)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_config.py"}, {"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.languages.all import all_languages\n\n\nclass InvalidManifestError(ValueError):\n pass\n\n\nMANIFEST_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n 'exclude': {'type': 'string', 'default': '^$'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n 'expected_return_value': {'type': 'number', 'default': 0},\n 'args': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n },\n 'required': ['id', 'name', 'entry', 'language', 'files'],\n },\n}\n\n\ndef validate_languages(hook_config):\n if hook_config['language'] not in all_languages:\n raise InvalidManifestError(\n 'Expected language {0} for {1} to be one of {2!r}'.format(\n hook_config['id'],\n hook_config['language'],\n all_languages,\n )\n )\n\n\ndef validate_files(hook_config):\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n hook_config['id'], hook_config['files'],\n )\n )\n\n if not is_regex_valid(hook_config.get('exclude', '')):\n raise InvalidManifestError(\n 'Invalid exclude regex at {0}: {1}'.format(\n hook_config['id'], hook_config['exclude'],\n )\n )\n\n\ndef additional_manifest_check(obj):\n for hook_config in obj:\n validate_languages(hook_config)\n validate_files(hook_config)\n\n\nload_manifest = get_validator(\n MANIFEST_JSON_SCHEMA,\n InvalidManifestError,\n additional_manifest_check,\n)\n\n\nrun = get_run_function(\n 'Manifest filenames.',\n load_manifest,\n InvalidManifestError,\n)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_manifest.py"}]} | 1,877 | 492 |
gh_patches_debug_7190 | rasdani/github-patches | git_diff | huggingface__accelerate-13 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't send the values of int to device
My training data looks like:
src_image, target_image, src_camera, target_camera, src_camera_idx, target_camera_idx
Where src_camera_idx, target_camera_idx are integers
When I try to apply accelerate I get the following error:
TypeError: Can't send the values of type <class 'int'> to device cuda:0, only of nested list/tuple/dicts of tensors or objects having a `to` method.
We don't need to send the integers to the device. Perhaps instead of raising an error here, you can simply skip the items that cannot be moved to device? Or at least give me the option to skip them if I know my data has such objects.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/utils.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import random
16 from enum import Enum
17 from typing import List, Optional, Union
18
19 import numpy as np
20 import torch
21
22 from .state import AcceleratorState, DistributedType, is_tpu_available
23
24
25 if is_tpu_available():
26 import torch_xla.core.xla_model as xm
27
28
29 class RNGType(Enum):
30 TORCH = "torch"
31 CUDA = "cuda"
32 XLA = "xla"
33 GENERATOR = "generator"
34
35
36 def set_seed(seed: int):
37 """
38 Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.
39
40 Args:
41 seed (:obj:`int`): The seed to set.
42 """
43 random.seed(seed)
44 np.random.seed(seed)
45 torch.manual_seed(seed)
46 torch.cuda.manual_seed_all(seed)
47 # ^^ safe to call this function even if cuda is not available
48 if is_tpu_available():
49 xm.set_rng_state(seed)
50
51
52 def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
53 # Get the proper rng state
54 if rng_type == RNGType.TORCH:
55 rng_state = torch.get_rng_state()
56 elif rng_type == RNGType.CUDA:
57 rng_state = torch.cuda.get_rng_state()
58 elif rng_type == RNGType.XLA:
59 assert is_tpu_available(), "Can't synchronize XLA seeds on an environment without TPUs."
60 rng_state = torch.tensor(xm.get_rng_state())
61 elif rng_type == RNGType.GENERATOR:
62 assert generator is not None, "Need a generator to synchronize its seed."
63 rng_state = generator.get_state()
64
65 # Broadcast the rng state from device 0 to other devices
66 state = AcceleratorState()
67 if state.distributed_type == DistributedType.TPU:
68 rng_state = xm.mesh_reduce("random_seed", rng_state, lambda x: x[0])
69 elif state.distributed_type == DistributedType.MULTI_GPU:
70 rng_state = rng_state.to(state.device)
71 torch.distributed.broadcast(rng_state, 0)
72 rng_state = rng_state.cpu()
73
74 # Set the broadcast rng state
75 if rng_type == RNGType.TORCH:
76 torch.set_rng_state(rng_state)
77 elif rng_type == RNGType.CUDA:
78 torch.cuda.set_rng_state(rng_state)
79 elif rng_type == RNGType.XLA:
80 xm.set_rng_state(rng_state.item())
81 elif rng_type == RNGType.GENERATOR:
82 generator.set_state(rng_state)
83
84
85 def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
86 for rng_type in rng_types:
87 synchronize_rng_state(RNGType(rng_type), generator=generator)
88
89
90 def send_to_device(tensor, device):
91 """
92 Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
93
94 Args:
95 tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
96 The data to send to a given device.
97 device (:obj:`torch.device`):
98 The device to send the data to
99
100 Returns:
101 The same data structure as :obj:`tensor` with all tensors sent to the proper device.
102 """
103 if isinstance(tensor, (list, tuple)):
104 return type(tensor)(send_to_device(t, device) for t in tensor)
105 elif isinstance(tensor, dict):
106 return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})
107 elif not hasattr(tensor, "to"):
108 raise TypeError(
109 f"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts "
110 "of tensors or objects having a `to` method."
111 )
112 return tensor.to(device)
113
114
115 def extract_model_from_parallel(model):
116 """
117 Extract a model from its distributed containers.
118
119 Args:
120 model (:obj:`torch.nn.Module`): The model to extract.
121
122 Returns:
123 :obj:`torch.nn.Module`: The extracted model.
124 """
125 while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):
126 model = model.module
127 return model
128
129
130 def _tpu_gather(tensor, name="tensor"):
131 if isinstance(tensor, (list, tuple)):
132 return type(tensor)(_tpu_gather(t, name=f"{name}_{i}") for i, t in enumerate(tensor))
133 elif isinstance(tensor, dict):
134 return type(tensor)({k: _tpu_gather(v, name=f"{name}_{k}") for k, v in tensor.items()})
135 elif not isinstance(tensor, torch.Tensor):
136 raise TypeError(f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.")
137 return xm.mesh_reduce(name, tensor, torch.cat)
138
139
140 def _gpu_gather(tensor):
141 if isinstance(tensor, (list, tuple)):
142 return type(tensor)(_gpu_gather(t) for t in tensor)
143 elif isinstance(tensor, dict):
144 return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})
145 elif not isinstance(tensor, torch.Tensor):
146 raise TypeError(f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.")
147 output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
148 torch.distributed.all_gather(output_tensors, tensor)
149 return torch.cat(output_tensors, dim=0)
150
151
152 def gather(tensor):
153 """
154 Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
155
156 Args:
157 tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
158 The data to gather.
159
160 Returns:
161 The same data structure as :obj:`tensor` with all tensors sent to the proper device.
162 """
163 if AcceleratorState().distributed_type == DistributedType.TPU:
164 return _tpu_gather(tensor, name="accelerate.utils.gather")
165 elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:
166 return _gpu_gather(tensor)
167 else:
168 return tensor
169
170
171 def wait_for_everyone():
172 """
173 Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
174
175 Warning::
176
177 Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
178 """
179 if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:
180 torch.distributed.barrier()
181 elif AcceleratorState().distributed_type == DistributedType.TPU:
182 xm.rendezvous("accelerate.utils.wait_for_everyone")
183
184
185 def save(obj, f):
186 """
187 Save the data to disk. Use in place of :obj:`torch.save()`.
188
189 Args:
190 obj: The data to save
191 f: The file (or file-like object) to use to save the data
192 """
193 if AcceleratorState().distributed_type == DistributedType.TPU:
194 xm.save(obj, f)
195 elif AcceleratorState().local_process_index == 0:
196 torch.save(obj, f)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py
--- a/src/accelerate/utils.py
+++ b/src/accelerate/utils.py
@@ -105,10 +105,7 @@
elif isinstance(tensor, dict):
return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})
elif not hasattr(tensor, "to"):
- raise TypeError(
- f"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts "
- "of tensors or objects having a `to` method."
- )
+ return tensor
return tensor.to(device)
| {"golden_diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -105,10 +105,7 @@\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n- raise TypeError(\n- f\"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts \"\n- \"of tensors or objects having a `to` method.\"\n- )\n+ return tensor\n return tensor.to(device)\n", "issue": "Can't send the values of int to device\nMy training data looks like:\r\n\r\nsrc_image, target_image, src_camera, target_camera, src_camera_idx, target_camera_idx\r\n\r\nWhere src_camera_idx, target_camera_idx are integers\r\n\r\nWhen I try to apply accelerate I get the following error:\r\nTypeError: Can't send the values of type <class 'int'> to device cuda:0, only of nested list/tuple/dicts of tensors or objects having a `to` method.\r\n\r\nWe don't need to send the integers to the device. Perhaps instead of raising an error here, you can simply skip the items that cannot be moved to device? Or at least give me the option to skip them if I know my data has such objects.\r\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nfrom enum import Enum\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nclass RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n XLA = \"xla\"\n GENERATOR = \"generator\"\n\n\ndef set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n\n Args:\n seed (:obj:`int`): The seed to set.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # ^^ safe to call this function even if cuda is not available\n if is_tpu_available():\n xm.set_rng_state(seed)\n\n\ndef synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n # Get the proper rng state\n if rng_type == RNGType.TORCH:\n rng_state = torch.get_rng_state()\n elif rng_type == RNGType.CUDA:\n rng_state = torch.cuda.get_rng_state()\n elif rng_type == RNGType.XLA:\n assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n rng_state = torch.tensor(xm.get_rng_state())\n elif rng_type == RNGType.GENERATOR:\n assert generator is not None, \"Need a generator to synchronize its seed.\"\n rng_state = generator.get_state()\n\n # Broadcast the rng state from device 0 to other devices\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n elif state.distributed_type == DistributedType.MULTI_GPU:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n\n # Set the broadcast rng state\n if rng_type == RNGType.TORCH:\n torch.set_rng_state(rng_state)\n elif rng_type == RNGType.CUDA:\n torch.cuda.set_rng_state(rng_state)\n elif rng_type == RNGType.XLA:\n xm.set_rng_state(rng_state.item())\n elif rng_type == RNGType.GENERATOR:\n generator.set_state(rng_state)\n\n\ndef synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n for rng_type in rng_types:\n synchronize_rng_state(RNGType(rng_type), generator=generator)\n\n\ndef send_to_device(tensor, device):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to send to a given device.\n device (:obj:`torch.device`):\n The device to send the data to\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(send_to_device(t, device) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n raise TypeError(\n f\"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts \"\n \"of tensors or objects having a `to` method.\"\n )\n return tensor.to(device)\n\n\ndef extract_model_from_parallel(model):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (:obj:`torch.nn.Module`): The model to extract.\n\n Returns:\n :obj:`torch.nn.Module`: The extracted model.\n \"\"\"\n while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):\n model = model.module\n return model\n\n\ndef _tpu_gather(tensor, name=\"tensor\"):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n return xm.mesh_reduce(name, tensor, torch.cat)\n\n\ndef _gpu_gather(tensor):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_gpu_gather(t) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(output_tensors, tensor)\n return torch.cat(output_tensors, dim=0)\n\n\ndef gather(tensor):\n \"\"\"\n Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n return _gpu_gather(tensor)\n else:\n return tensor\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n Warning::\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\n xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of :obj:`torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif AcceleratorState().local_process_index == 0:\n torch.save(obj, f)\n", "path": "src/accelerate/utils.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nfrom enum import Enum\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nclass RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n XLA = \"xla\"\n GENERATOR = \"generator\"\n\n\ndef set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n\n Args:\n seed (:obj:`int`): The seed to set.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # ^^ safe to call this function even if cuda is not available\n if is_tpu_available():\n xm.set_rng_state(seed)\n\n\ndef synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n # Get the proper rng state\n if rng_type == RNGType.TORCH:\n rng_state = torch.get_rng_state()\n elif rng_type == RNGType.CUDA:\n rng_state = torch.cuda.get_rng_state()\n elif rng_type == RNGType.XLA:\n assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n rng_state = torch.tensor(xm.get_rng_state())\n elif rng_type == RNGType.GENERATOR:\n assert generator is not None, \"Need a generator to synchronize its seed.\"\n rng_state = generator.get_state()\n\n # Broadcast the rng state from device 0 to other devices\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n elif state.distributed_type == DistributedType.MULTI_GPU:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n\n # Set the broadcast rng state\n if rng_type == RNGType.TORCH:\n torch.set_rng_state(rng_state)\n elif rng_type == RNGType.CUDA:\n torch.cuda.set_rng_state(rng_state)\n elif rng_type == RNGType.XLA:\n xm.set_rng_state(rng_state.item())\n elif rng_type == RNGType.GENERATOR:\n generator.set_state(rng_state)\n\n\ndef synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n for rng_type in rng_types:\n synchronize_rng_state(RNGType(rng_type), generator=generator)\n\n\ndef send_to_device(tensor, device):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to send to a given device.\n device (:obj:`torch.device`):\n The device to send the data to\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(send_to_device(t, device) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n return tensor\n return tensor.to(device)\n\n\ndef extract_model_from_parallel(model):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (:obj:`torch.nn.Module`): The model to extract.\n\n Returns:\n :obj:`torch.nn.Module`: The extracted model.\n \"\"\"\n while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):\n model = model.module\n return model\n\n\ndef _tpu_gather(tensor, name=\"tensor\"):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n return xm.mesh_reduce(name, tensor, torch.cat)\n\n\ndef _gpu_gather(tensor):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_gpu_gather(t) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(output_tensors, tensor)\n return torch.cat(output_tensors, dim=0)\n\n\ndef gather(tensor):\n \"\"\"\n Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n return _gpu_gather(tensor)\n else:\n return tensor\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n Warning::\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\n xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of :obj:`torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif AcceleratorState().local_process_index == 0:\n torch.save(obj, f)\n", "path": "src/accelerate/utils.py"}]} | 2,563 | 152 |
gh_patches_debug_964 | rasdani/github-patches | git_diff | mars-project__mars-1631 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `Cannot find serializable class` raised when calling set_label
When calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/dataframe/indexing/__init__.py`
Content:
```
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def _install():
17 from pandas.util import cache_readonly
18 from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE
19 from .at import at
20 from .getitem import dataframe_getitem, series_getitem
21 from .iat import iat
22 from .iloc import iloc, head, tail
23 from .insert import df_insert
24 from .loc import loc
25 from .rename import df_rename, series_rename, index_rename
26 from .reset_index import df_reset_index, series_reset_index
27 from .set_index import set_index
28 from .setitem import dataframe_setitem
29 from .reindex import reindex
30 from .where import mask, where
31
32 for cls in DATAFRAME_TYPE + SERIES_TYPE:
33 setattr(cls, 'iloc', cache_readonly(iloc))
34 setattr(cls, 'loc', cache_readonly(loc))
35 setattr(cls, 'iat', cache_readonly(iat))
36 setattr(cls, 'at', cache_readonly(at))
37 setattr(cls, 'reindex', reindex)
38 setattr(cls, 'head', head)
39 setattr(cls, 'tail', tail)
40 setattr(cls, 'mask', mask)
41 setattr(cls, 'where', where)
42
43 for cls in DATAFRAME_TYPE:
44 setattr(cls, 'set_index', set_index)
45 setattr(cls, '__getitem__', dataframe_getitem)
46 setattr(cls, '__setitem__', dataframe_setitem)
47 setattr(cls, 'insert', df_insert)
48 setattr(cls, 'reset_index', df_reset_index)
49 setattr(cls, 'rename', df_rename)
50
51 for cls in SERIES_TYPE:
52 setattr(cls, '__getitem__', series_getitem)
53 setattr(cls, 'reset_index', series_reset_index)
54 setattr(cls, 'rename', series_rename)
55
56 for cls in INDEX_TYPE:
57 setattr(cls, 'rename', index_rename)
58
59
60 _install()
61 del _install
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py
--- a/mars/dataframe/indexing/__init__.py
+++ b/mars/dataframe/indexing/__init__.py
@@ -56,6 +56,10 @@
for cls in INDEX_TYPE:
setattr(cls, 'rename', index_rename)
+ # make sure operand is registered
+ from .set_label import DataFrameSetLabel
+ del DataFrameSetLabel
+
_install()
del _install
| {"golden_diff": "diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py\n--- a/mars/dataframe/indexing/__init__.py\n+++ b/mars/dataframe/indexing/__init__.py\n@@ -56,6 +56,10 @@\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n \n+ # make sure operand is registered\n+ from .set_label import DataFrameSetLabel\n+ del DataFrameSetLabel\n+\n \n _install()\n del _install\n", "issue": "[BUG] `Cannot find serializable class` raised when calling set_label\nWhen calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py"}], "after_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n # make sure operand is registered\n from .set_label import DataFrameSetLabel\n del DataFrameSetLabel\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py"}]} | 970 | 119 |
gh_patches_debug_12109 | rasdani/github-patches | git_diff | Parsl__parsl-2324 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
parsl-visualize creates invalid monitoring.db SQL schema
**Describe the bug**
Hi there, and thanks for your support. I'm using HighThroughputExecutor+SlurmProvider to manage my workflow, and I sometimes encounter an unexpected OperationalError when including a MonitoringHub in my Config, which then breaks the monitoring information and also keeps the main python script hanging. The hanging behavior should be related to https://github.com/Parsl/parsl/pull/1917#discussion_r647487589. More generally, I guess that this issue is related to https://github.com/Parsl/parsl/issues/2123 (and indeed I do sometimes have to stop my scripts with ctrl-c, which may have triggered this behavior).
**To Reproduce**
Unfortunately I don't have a minimal working example to show, since (1) this behavior is not reproducible (sometimes the same script does work correctly, sometimes it doesn't), and (2) parsl only enters in a small part of a more complex script that would be useless to fully describe here. Hopefully some parts of the configuration and the logs (below) can be useful to understand what is going wrong.
To configure parsl, I have some trivial auxiliary functions that define a MonitoringHub, a HighThroughputExecutor and a SlurmProvider, for instance like this one:
```
def define_MonitoringHub(workflow_name=None):
kwargs = dict(
hub_address=address_by_hostname(),
monitoring_debug=True,
resource_monitoring_interval=30,
)
if workflow_name is not None:
kwargs["workflow_name"] = workflow_name
return MonitoringHub(**kwargs)
```
In the main script, these are called as
```
provider = define_SlurmProvider(...)
htex = define_HighThroughputExecutor(provider=provider)
monitoring = define_MonitoringHub(workflow_name="test")
config = Config(executors=[htex], monitoring=monitoring)
parsl.clear()
parsl.load(config)
```
I let my main script run, and it correctly executes a set of `python_app`'s through SLURM (I see them running with `squeue`, and I verify that the output is correct). After all apps executed, the main script remains hanging, and if I check the logs I see in `runinfo/000/database_manager.log` that the script got stuck in an OperationalError loop:
```
$ tail -n 8 runinfo/000/database_manager.log
2022-05-30 10:34:02.599 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked
[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]
[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
2022-05-30 10:34:03.604 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked
[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]
[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
```
where it just prints this error message once per second. I attach the full `database_manager.log`, `monitoring_router.log` and `parsl.log`. You see that `parsl.log` is missing the final "DFK cleanup complete" cleanup line (which is present for cases that run smoothly).
[database_manager.log](https://github.com/Parsl/parsl/files/8798140/database_manager.log)
[monitoring_router.log](https://github.com/Parsl/parsl/files/8798141/monitoring_router.log)
[parsl.log](https://github.com/Parsl/parsl/files/8798142/parsl.log)
After this situation takes place, re-running the same script typically leads to the same behavior, even if I remove `monitoring.db` and the `runinfo` folder (which is puzzling.. is there a state of the DFK which somehow remains active after I kill the python script?), although sometimes I recover the expected correct behavior.
Meanwhile, these "wrong" runs cannot be accessed through `parsl-visualize` (when I select a workflow in the web interface, I either get "500 Internal server error" or "Workflow 903cd6fa-cda7-416d-9ada-bdeb09a030d2 does not have any resource usage records. ") , but this is not surprising given that the database is probably corrupted somewhere.
**Expected behavior**
I expected no OperationalError, as the `task_time_invoked` column should be present.
**Environment**
- OS: OS: Ubuntu 16
- Python version: 3.8.13
- Parsl version: 1.2.0
**Distributed Environment**
- Where are you running the Parsl script from ? Login node
- Where do you need the workers to run ? Compute nodes
(apologies for the non-reproducible issue description, I'll keep trying to make the unexpected behavior more robust)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/monitoring/visualization/models.py`
Content:
```
1 from flask_sqlalchemy import SQLAlchemy
2
3
4 WORKFLOW = 'workflow' # Workflow table includes workflow metadata
5 TASK = 'task' # Task table includes task metadata
6 STATUS = 'status' # Status table includes task status
7 RESOURCE = 'resource' # Resource table includes task resource utilization
8 NODE = 'node' # Node table include node info
9
10 db = SQLAlchemy()
11
12
13 class Workflow(db.Model):
14 __tablename__ = WORKFLOW
15 run_id = db.Column(db.Text, nullable=False, primary_key=True)
16 workflow_name = db.Column(db.Text, nullable=True)
17 workflow_version = db.Column(db.Text, nullable=True)
18 time_began = db.Column(db.DateTime, nullable=False) # Why not date?
19 time_completed = db.Column(db.DateTime)
20 host = db.Column(db.Text, nullable=False)
21 user = db.Column(db.Text, nullable=False)
22 rundir = db.Column(db.Text, nullable=False)
23 tasks_failed_count = db.Column(db.Integer, nullable=False)
24 tasks_completed_count = db.Column(db.Integer, nullable=False)
25
26
27 class Node(db.Model):
28 __tablename__ = NODE
29 id = db.Column('id', db.Integer, nullable=False, primary_key=True, autoincrement=True)
30 run_id = db.Column('run_id', db.Text, nullable=False)
31 hostname = db.Column('hostname', db.Text, nullable=False)
32 cpu_count = db.Column('cpu_count', db.Integer, nullable=False)
33 total_memory = db.Column('total_memory', db.Integer, nullable=False)
34 active = db.Column('active', db.Boolean, nullable=False)
35 worker_count = db.Column('worker_count', db.Integer, nullable=False)
36 python_v = db.Column('python_v', db.Text, nullable=False)
37 reg_time = db.Column('reg_time', db.DateTime, nullable=False)
38
39
40 # TODO: expand to full set of info
41 class Status(db.Model):
42 __tablename__ = STATUS
43 task_id = db.Column(db.Integer, db.ForeignKey(
44 'task.task_id'), nullable=False)
45 task_status_name = db.Column(db.Text, nullable=False)
46 timestamp = db.Column(db.DateTime, nullable=False)
47 run_id = db.Column(db.Text, db.ForeignKey(
48 'workflow.run_id'), nullable=False)
49 __table_args__ = (
50 db.PrimaryKeyConstraint('task_id', 'run_id',
51 'task_status_name', 'timestamp'),
52 )
53
54
55 class Task(db.Model):
56 __tablename__ = TASK
57 task_id = db.Column('task_id', db.Integer, nullable=False)
58 run_id = db.Column('run_id', db.Text, nullable=False)
59 task_func_name = db.Column('task_func_name', db.Text, nullable=False)
60 task_depends = db.Column('task_depends', db.Text, nullable=True)
61 task_time_returned = db.Column(
62 'task_time_returned', db.DateTime, nullable=True)
63 task_memoize = db.Column('task_memoize', db.Text, nullable=False)
64 task_inputs = db.Column('task_inputs', db.Text, nullable=True)
65 task_outputs = db.Column('task_outputs', db.Text, nullable=True)
66 task_stdin = db.Column('task_stdin', db.Text, nullable=True)
67 task_stdout = db.Column('task_stdout', db.Text, nullable=True)
68 task_stderr = db.Column('task_stderr', db.Text, nullable=True)
69 __table_args__ = (
70 db.PrimaryKeyConstraint('task_id', 'run_id'),
71 )
72
73
74 class Resource(db.Model):
75 __tablename__ = RESOURCE
76 task_id = db.Column('task_id', db.Integer, db.ForeignKey(
77 'task.task_id'), nullable=False)
78 timestamp = db.Column('timestamp', db.DateTime, nullable=False)
79 run_id = db.Column('run_id', db.Text, db.ForeignKey(
80 'workflow.run_id'), nullable=False)
81 resource_monitoring_interval = db.Column(
82 'resource_monitoring_interval', db.Float, nullable=True)
83 psutil_process_pid = db.Column(
84 'psutil_process_pid', db.Integer, nullable=True)
85 psutil_process_memory_percent = db.Column(
86 'psutil_process_memory_percent', db.Float, nullable=True)
87 psutil_process_children_count = db.Column(
88 'psutil_process_children_count', db.Float, nullable=True)
89 psutil_process_time_user = db.Column(
90 'psutil_process_time_user', db.Float, nullable=True)
91 psutil_process_time_system = db.Column(
92 'psutil_process_time_system', db.Float, nullable=True)
93 psutil_process_memory_virtual = db.Column(
94 'psutil_process_memory_virtual', db.Float, nullable=True)
95 psutil_process_memory_resident = db.Column(
96 'psutil_process_memory_resident', db.Float, nullable=True)
97 psutil_process_disk_read = db.Column(
98 'psutil_process_disk_read', db.Float, nullable=True)
99 psutil_process_disk_write = db.Column(
100 'psutil_process_disk_write', db.Float, nullable=True)
101 psutil_process_status = db.Column(
102 'psutil_process_status', db.Text, nullable=True)
103 __table_args__ = (
104 db.PrimaryKeyConstraint('task_id', 'run_id', 'timestamp'),)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/monitoring/visualization/models.py b/parsl/monitoring/visualization/models.py
--- a/parsl/monitoring/visualization/models.py
+++ b/parsl/monitoring/visualization/models.py
@@ -58,6 +58,8 @@
run_id = db.Column('run_id', db.Text, nullable=False)
task_func_name = db.Column('task_func_name', db.Text, nullable=False)
task_depends = db.Column('task_depends', db.Text, nullable=True)
+ task_time_invoked = db.Column(
+ 'task_time_invoked', db.DateTime, nullable=True)
task_time_returned = db.Column(
'task_time_returned', db.DateTime, nullable=True)
task_memoize = db.Column('task_memoize', db.Text, nullable=False)
| {"golden_diff": "diff --git a/parsl/monitoring/visualization/models.py b/parsl/monitoring/visualization/models.py\n--- a/parsl/monitoring/visualization/models.py\n+++ b/parsl/monitoring/visualization/models.py\n@@ -58,6 +58,8 @@\n run_id = db.Column('run_id', db.Text, nullable=False)\n task_func_name = db.Column('task_func_name', db.Text, nullable=False)\n task_depends = db.Column('task_depends', db.Text, nullable=True)\n+ task_time_invoked = db.Column(\n+ 'task_time_invoked', db.DateTime, nullable=True)\n task_time_returned = db.Column(\n 'task_time_returned', db.DateTime, nullable=True)\n task_memoize = db.Column('task_memoize', db.Text, nullable=False)\n", "issue": "parsl-visualize creates invalid monitoring.db SQL schema\n**Describe the bug**\r\nHi there, and thanks for your support. I'm using HighThroughputExecutor+SlurmProvider to manage my workflow, and I sometimes encounter an unexpected OperationalError when including a MonitoringHub in my Config, which then breaks the monitoring information and also keeps the main python script hanging. The hanging behavior should be related to https://github.com/Parsl/parsl/pull/1917#discussion_r647487589. More generally, I guess that this issue is related to https://github.com/Parsl/parsl/issues/2123 (and indeed I do sometimes have to stop my scripts with ctrl-c, which may have triggered this behavior).\r\n\r\n**To Reproduce**\r\nUnfortunately I don't have a minimal working example to show, since (1) this behavior is not reproducible (sometimes the same script does work correctly, sometimes it doesn't), and (2) parsl only enters in a small part of a more complex script that would be useless to fully describe here. Hopefully some parts of the configuration and the logs (below) can be useful to understand what is going wrong.\r\n\r\n\r\nTo configure parsl, I have some trivial auxiliary functions that define a MonitoringHub, a HighThroughputExecutor and a SlurmProvider, for instance like this one:\r\n```\r\ndef define_MonitoringHub(workflow_name=None):\r\n kwargs = dict(\r\n hub_address=address_by_hostname(),\r\n monitoring_debug=True,\r\n resource_monitoring_interval=30,\r\n )\r\n if workflow_name is not None:\r\n kwargs[\"workflow_name\"] = workflow_name\r\n return MonitoringHub(**kwargs)\r\n```\r\nIn the main script, these are called as\r\n```\r\n provider = define_SlurmProvider(...)\r\n htex = define_HighThroughputExecutor(provider=provider)\r\n monitoring = define_MonitoringHub(workflow_name=\"test\")\r\n config = Config(executors=[htex], monitoring=monitoring)\r\n parsl.clear()\r\n parsl.load(config)\r\n```\r\n\r\nI let my main script run, and it correctly executes a set of `python_app`'s through SLURM (I see them running with `squeue`, and I verify that the output is correct). After all apps executed, the main script remains hanging, and if I check the logs I see in `runinfo/000/database_manager.log` that the script got stuck in an OperationalError loop:\r\n```\r\n$ tail -n 8 runinfo/000/database_manager.log \r\n2022-05-30 10:34:02.599 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked\r\n[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]\r\n[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]\r\n(Background on this error at: http://sqlalche.me/e/13/e3q8)\r\n2022-05-30 10:34:03.604 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked\r\n[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]\r\n[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]\r\n(Background on this error at: http://sqlalche.me/e/13/e3q8)\r\n```\r\nwhere it just prints this error message once per second. I attach the full `database_manager.log`, `monitoring_router.log` and `parsl.log`. You see that `parsl.log` is missing the final \"DFK cleanup complete\" cleanup line (which is present for cases that run smoothly).\r\n\r\n[database_manager.log](https://github.com/Parsl/parsl/files/8798140/database_manager.log)\r\n[monitoring_router.log](https://github.com/Parsl/parsl/files/8798141/monitoring_router.log)\r\n[parsl.log](https://github.com/Parsl/parsl/files/8798142/parsl.log)\r\n\r\n\r\n\r\nAfter this situation takes place, re-running the same script typically leads to the same behavior, even if I remove `monitoring.db` and the `runinfo` folder (which is puzzling.. is there a state of the DFK which somehow remains active after I kill the python script?), although sometimes I recover the expected correct behavior.\r\nMeanwhile, these \"wrong\" runs cannot be accessed through `parsl-visualize` (when I select a workflow in the web interface, I either get \"500 Internal server error\" or \"Workflow 903cd6fa-cda7-416d-9ada-bdeb09a030d2 does not have any resource usage records. \") , but this is not surprising given that the database is probably corrupted somewhere.\r\n\r\n\r\n**Expected behavior**\r\nI expected no OperationalError, as the `task_time_invoked` column should be present.\r\n\r\n**Environment**\r\n - OS: OS: Ubuntu 16\r\n - Python version: 3.8.13\r\n - Parsl version: 1.2.0\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? Login node\r\n- Where do you need the workers to run ? Compute nodes\r\n\r\n(apologies for the non-reproducible issue description, I'll keep trying to make the unexpected behavior more robust)\n", "before_files": [{"content": "from flask_sqlalchemy import SQLAlchemy\n\n\nWORKFLOW = 'workflow' # Workflow table includes workflow metadata\nTASK = 'task' # Task table includes task metadata\nSTATUS = 'status' # Status table includes task status\nRESOURCE = 'resource' # Resource table includes task resource utilization\nNODE = 'node' # Node table include node info\n\ndb = SQLAlchemy()\n\n\nclass Workflow(db.Model):\n __tablename__ = WORKFLOW\n run_id = db.Column(db.Text, nullable=False, primary_key=True)\n workflow_name = db.Column(db.Text, nullable=True)\n workflow_version = db.Column(db.Text, nullable=True)\n time_began = db.Column(db.DateTime, nullable=False) # Why not date?\n time_completed = db.Column(db.DateTime)\n host = db.Column(db.Text, nullable=False)\n user = db.Column(db.Text, nullable=False)\n rundir = db.Column(db.Text, nullable=False)\n tasks_failed_count = db.Column(db.Integer, nullable=False)\n tasks_completed_count = db.Column(db.Integer, nullable=False)\n\n\nclass Node(db.Model):\n __tablename__ = NODE\n id = db.Column('id', db.Integer, nullable=False, primary_key=True, autoincrement=True)\n run_id = db.Column('run_id', db.Text, nullable=False)\n hostname = db.Column('hostname', db.Text, nullable=False)\n cpu_count = db.Column('cpu_count', db.Integer, nullable=False)\n total_memory = db.Column('total_memory', db.Integer, nullable=False)\n active = db.Column('active', db.Boolean, nullable=False)\n worker_count = db.Column('worker_count', db.Integer, nullable=False)\n python_v = db.Column('python_v', db.Text, nullable=False)\n reg_time = db.Column('reg_time', db.DateTime, nullable=False)\n\n\n# TODO: expand to full set of info\nclass Status(db.Model):\n __tablename__ = STATUS\n task_id = db.Column(db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n task_status_name = db.Column(db.Text, nullable=False)\n timestamp = db.Column(db.DateTime, nullable=False)\n run_id = db.Column(db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id',\n 'task_status_name', 'timestamp'),\n )\n\n\nclass Task(db.Model):\n __tablename__ = TASK\n task_id = db.Column('task_id', db.Integer, nullable=False)\n run_id = db.Column('run_id', db.Text, nullable=False)\n task_func_name = db.Column('task_func_name', db.Text, nullable=False)\n task_depends = db.Column('task_depends', db.Text, nullable=True)\n task_time_returned = db.Column(\n 'task_time_returned', db.DateTime, nullable=True)\n task_memoize = db.Column('task_memoize', db.Text, nullable=False)\n task_inputs = db.Column('task_inputs', db.Text, nullable=True)\n task_outputs = db.Column('task_outputs', db.Text, nullable=True)\n task_stdin = db.Column('task_stdin', db.Text, nullable=True)\n task_stdout = db.Column('task_stdout', db.Text, nullable=True)\n task_stderr = db.Column('task_stderr', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id'),\n )\n\n\nclass Resource(db.Model):\n __tablename__ = RESOURCE\n task_id = db.Column('task_id', db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n timestamp = db.Column('timestamp', db.DateTime, nullable=False)\n run_id = db.Column('run_id', db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n resource_monitoring_interval = db.Column(\n 'resource_monitoring_interval', db.Float, nullable=True)\n psutil_process_pid = db.Column(\n 'psutil_process_pid', db.Integer, nullable=True)\n psutil_process_memory_percent = db.Column(\n 'psutil_process_memory_percent', db.Float, nullable=True)\n psutil_process_children_count = db.Column(\n 'psutil_process_children_count', db.Float, nullable=True)\n psutil_process_time_user = db.Column(\n 'psutil_process_time_user', db.Float, nullable=True)\n psutil_process_time_system = db.Column(\n 'psutil_process_time_system', db.Float, nullable=True)\n psutil_process_memory_virtual = db.Column(\n 'psutil_process_memory_virtual', db.Float, nullable=True)\n psutil_process_memory_resident = db.Column(\n 'psutil_process_memory_resident', db.Float, nullable=True)\n psutil_process_disk_read = db.Column(\n 'psutil_process_disk_read', db.Float, nullable=True)\n psutil_process_disk_write = db.Column(\n 'psutil_process_disk_write', db.Float, nullable=True)\n psutil_process_status = db.Column(\n 'psutil_process_status', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id', 'timestamp'),)\n", "path": "parsl/monitoring/visualization/models.py"}], "after_files": [{"content": "from flask_sqlalchemy import SQLAlchemy\n\n\nWORKFLOW = 'workflow' # Workflow table includes workflow metadata\nTASK = 'task' # Task table includes task metadata\nSTATUS = 'status' # Status table includes task status\nRESOURCE = 'resource' # Resource table includes task resource utilization\nNODE = 'node' # Node table include node info\n\ndb = SQLAlchemy()\n\n\nclass Workflow(db.Model):\n __tablename__ = WORKFLOW\n run_id = db.Column(db.Text, nullable=False, primary_key=True)\n workflow_name = db.Column(db.Text, nullable=True)\n workflow_version = db.Column(db.Text, nullable=True)\n time_began = db.Column(db.DateTime, nullable=False) # Why not date?\n time_completed = db.Column(db.DateTime)\n host = db.Column(db.Text, nullable=False)\n user = db.Column(db.Text, nullable=False)\n rundir = db.Column(db.Text, nullable=False)\n tasks_failed_count = db.Column(db.Integer, nullable=False)\n tasks_completed_count = db.Column(db.Integer, nullable=False)\n\n\nclass Node(db.Model):\n __tablename__ = NODE\n id = db.Column('id', db.Integer, nullable=False, primary_key=True, autoincrement=True)\n run_id = db.Column('run_id', db.Text, nullable=False)\n hostname = db.Column('hostname', db.Text, nullable=False)\n cpu_count = db.Column('cpu_count', db.Integer, nullable=False)\n total_memory = db.Column('total_memory', db.Integer, nullable=False)\n active = db.Column('active', db.Boolean, nullable=False)\n worker_count = db.Column('worker_count', db.Integer, nullable=False)\n python_v = db.Column('python_v', db.Text, nullable=False)\n reg_time = db.Column('reg_time', db.DateTime, nullable=False)\n\n\n# TODO: expand to full set of info\nclass Status(db.Model):\n __tablename__ = STATUS\n task_id = db.Column(db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n task_status_name = db.Column(db.Text, nullable=False)\n timestamp = db.Column(db.DateTime, nullable=False)\n run_id = db.Column(db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id',\n 'task_status_name', 'timestamp'),\n )\n\n\nclass Task(db.Model):\n __tablename__ = TASK\n task_id = db.Column('task_id', db.Integer, nullable=False)\n run_id = db.Column('run_id', db.Text, nullable=False)\n task_func_name = db.Column('task_func_name', db.Text, nullable=False)\n task_depends = db.Column('task_depends', db.Text, nullable=True)\n task_time_invoked = db.Column(\n 'task_time_invoked', db.DateTime, nullable=True)\n task_time_returned = db.Column(\n 'task_time_returned', db.DateTime, nullable=True)\n task_memoize = db.Column('task_memoize', db.Text, nullable=False)\n task_inputs = db.Column('task_inputs', db.Text, nullable=True)\n task_outputs = db.Column('task_outputs', db.Text, nullable=True)\n task_stdin = db.Column('task_stdin', db.Text, nullable=True)\n task_stdout = db.Column('task_stdout', db.Text, nullable=True)\n task_stderr = db.Column('task_stderr', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id'),\n )\n\n\nclass Resource(db.Model):\n __tablename__ = RESOURCE\n task_id = db.Column('task_id', db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n timestamp = db.Column('timestamp', db.DateTime, nullable=False)\n run_id = db.Column('run_id', db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n resource_monitoring_interval = db.Column(\n 'resource_monitoring_interval', db.Float, nullable=True)\n psutil_process_pid = db.Column(\n 'psutil_process_pid', db.Integer, nullable=True)\n psutil_process_memory_percent = db.Column(\n 'psutil_process_memory_percent', db.Float, nullable=True)\n psutil_process_children_count = db.Column(\n 'psutil_process_children_count', db.Float, nullable=True)\n psutil_process_time_user = db.Column(\n 'psutil_process_time_user', db.Float, nullable=True)\n psutil_process_time_system = db.Column(\n 'psutil_process_time_system', db.Float, nullable=True)\n psutil_process_memory_virtual = db.Column(\n 'psutil_process_memory_virtual', db.Float, nullable=True)\n psutil_process_memory_resident = db.Column(\n 'psutil_process_memory_resident', db.Float, nullable=True)\n psutil_process_disk_read = db.Column(\n 'psutil_process_disk_read', db.Float, nullable=True)\n psutil_process_disk_write = db.Column(\n 'psutil_process_disk_write', db.Float, nullable=True)\n psutil_process_status = db.Column(\n 'psutil_process_status', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id', 'timestamp'),)\n", "path": "parsl/monitoring/visualization/models.py"}]} | 3,042 | 182 |
gh_patches_debug_60446 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update lasso example
**Description**
The lasso example references heat.ml.regression.lasso
**To Reproduce**
Steps to reproduce the behavior:
1. Which module/class/function is affected?
examples/lasso/demo.py
2. What are the circumstances under which the bug appears?
run demo.py
3. What is the exact error-message/errorous behavious?
`Traceback (most recent call last):
File "demo.py", line 10, in <module>
import heat.ml.regression.lasso as lasso
ModuleNotFoundError: No module named 'heat.ml'`
**Expected behavior**
Should import heat.core.regression.lasso
**Version Info**
Which version are you using?
0.2.1
Update lasso example
**Description**
The lasso example references heat.ml.regression.lasso
**To Reproduce**
Steps to reproduce the behavior:
1. Which module/class/function is affected?
examples/lasso/demo.py
2. What are the circumstances under which the bug appears?
run demo.py
3. What is the exact error-message/errorous behavious?
`Traceback (most recent call last):
File "demo.py", line 10, in <module>
import heat.ml.regression.lasso as lasso
ModuleNotFoundError: No module named 'heat.ml'`
**Expected behavior**
Should import heat.core.regression.lasso
**Version Info**
Which version are you using?
0.2.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/lasso/demo.py`
Content:
```
1 import numpy as np
2 import torch
3 import sys
4
5 sys.path.append("../../")
6
7 import heat as ht
8 from matplotlib import pyplot as plt
9 from sklearn import datasets
10 import heat.ml.regression.lasso as lasso
11 import plotfkt
12
13 # read scikit diabetes data set
14 diabetes = datasets.load_diabetes()
15
16 # load diabetes dataset from hdf5 file
17 X = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="x", split=0)
18 y = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="y", split=0)
19
20 # normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved
21 X = X / ht.sqrt((ht.mean(X ** 2, axis=0)))
22
23 # HeAT lasso instance
24 estimator = lasso.HeatLasso(max_iter=100)
25
26 # List lasso model parameters
27 theta_list = list()
28
29 # Range of lambda values
30 lamda = np.logspace(0, 4, 10) / 10
31
32 # compute the lasso path
33 for l in lamda:
34 estimator.lam = l
35 estimator.fit(X, y)
36 theta_list.append(estimator.theta.numpy().flatten())
37
38 # Stack estimated model parameters into one numpy array
39 theta_lasso = np.stack(theta_list).T
40
41 # Stack into numpy array
42 theta_lasso = np.stack(theta_list).T[1:, :]
43
44
45 # plot lasso paths
46 plt.subplot(3, 1, 1)
47 plotfkt.plot_lasso_path(
48 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - HeAT implementation"
49 )
50
51 if X.is_distributed():
52 distributed = X.comm.rank
53 else:
54 distributed = False
55
56 # Now the same stuff in numpy
57 X = diabetes.data.astype("float32")
58 y = diabetes.target.astype("float32")
59
60 m, _ = X.shape
61 X = np.concatenate((np.ones((m, 1)).astype("float32"), X), axis=1)
62
63 # normalize dataset
64 X = X / np.sqrt((np.mean(X ** 2, axis=0)))
65
66 # Numpy lasso instance
67 estimator = lasso.NumpyLasso(max_iter=100)
68
69 # List lasso model parameters
70 theta_list = list()
71
72 # Range of lambda values
73 lamda = np.logspace(0, 4, 10) / 10
74
75 # compute the lasso path
76 for l in lamda:
77 estimator.lam = l
78 estimator.fit(X, y)
79 theta_list.append(estimator.theta.flatten())
80
81 # Stack estimated model parameters into one numpy array
82 theta_lasso = np.stack(theta_list).T
83
84 # Stack into numpy array
85 theta_lasso = np.stack(theta_list).T[1:, :]
86
87 # plot lasso paths
88 plt.subplot(3, 1, 2)
89 plotfkt.plot_lasso_path(
90 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - Numpy implementation"
91 )
92
93 # Now the same stuff again in PyTorch
94 X = torch.tensor(X)
95 y = torch.tensor(y)
96
97 # HeAT lasso instance
98 estimator = lasso.PytorchLasso(max_iter=100)
99
100 # List lasso model parameters
101 theta_list = list()
102
103 # Range of lambda values
104 lamda = np.logspace(0, 4, 10) / 10
105
106 # compute the lasso path
107 for l in lamda:
108 estimator.lam = l
109 estimator.fit(X, y)
110 theta_list.append(estimator.theta.numpy().flatten())
111
112 # Stack estimated model parameters into one numpy array
113 theta_lasso = np.stack(theta_list).T
114
115 # Stack into numpy array
116 theta_lasso = np.stack(theta_list).T[1:, :]
117
118 # plot lasso paths
119 plt.subplot(3, 1, 3)
120 plotfkt.plot_lasso_path(
121 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - PyTorch implementation"
122 )
123
124 # plot only with first rank
125 if distributed is False:
126 plt.show()
127 elif distributed == 0:
128 plt.show()
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/lasso/demo.py b/examples/lasso/demo.py
--- a/examples/lasso/demo.py
+++ b/examples/lasso/demo.py
@@ -7,7 +7,7 @@
import heat as ht
from matplotlib import pyplot as plt
from sklearn import datasets
-import heat.ml.regression.lasso as lasso
+import heat.core.regression.lasso as lasso
import plotfkt
# read scikit diabetes data set
| {"golden_diff": "diff --git a/examples/lasso/demo.py b/examples/lasso/demo.py\n--- a/examples/lasso/demo.py\n+++ b/examples/lasso/demo.py\n@@ -7,7 +7,7 @@\n import heat as ht\n from matplotlib import pyplot as plt\n from sklearn import datasets\n-import heat.ml.regression.lasso as lasso\n+import heat.core.regression.lasso as lasso\n import plotfkt\n \n # read scikit diabetes data set\n", "issue": "Update lasso example\n**Description**\r\nThe lasso example references heat.ml.regression.lasso\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Which module/class/function is affected?\r\n examples/lasso/demo.py\r\n2. What are the circumstances under which the bug appears?\r\n run demo.py\r\n3. What is the exact error-message/errorous behavious?\r\n `Traceback (most recent call last):\r\n File \"demo.py\", line 10, in <module>\r\n import heat.ml.regression.lasso as lasso\r\nModuleNotFoundError: No module named 'heat.ml'`\r\n\r\n**Expected behavior**\r\nShould import heat.core.regression.lasso\r\n\r\n**Version Info**\r\n Which version are you using?\r\n0.2.1\r\n\nUpdate lasso example\n**Description**\r\nThe lasso example references heat.ml.regression.lasso\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Which module/class/function is affected?\r\n examples/lasso/demo.py\r\n2. What are the circumstances under which the bug appears?\r\n run demo.py\r\n3. What is the exact error-message/errorous behavious?\r\n `Traceback (most recent call last):\r\n File \"demo.py\", line 10, in <module>\r\n import heat.ml.regression.lasso as lasso\r\nModuleNotFoundError: No module named 'heat.ml'`\r\n\r\n**Expected behavior**\r\nShould import heat.core.regression.lasso\r\n\r\n**Version Info**\r\n Which version are you using?\r\n0.2.1\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\nimport sys\n\nsys.path.append(\"../../\")\n\nimport heat as ht\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport heat.ml.regression.lasso as lasso\nimport plotfkt\n\n# read scikit diabetes data set\ndiabetes = datasets.load_diabetes()\n\n# load diabetes dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"x\", split=0)\ny = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"y\", split=0)\n\n# normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\nX = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\n\n# HeAT lasso instance\nestimator = lasso.HeatLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n\n# plot lasso paths\nplt.subplot(3, 1, 1)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - HeAT implementation\"\n)\n\nif X.is_distributed():\n distributed = X.comm.rank\nelse:\n distributed = False\n\n# Now the same stuff in numpy\nX = diabetes.data.astype(\"float32\")\ny = diabetes.target.astype(\"float32\")\n\nm, _ = X.shape\nX = np.concatenate((np.ones((m, 1)).astype(\"float32\"), X), axis=1)\n\n# normalize dataset\nX = X / np.sqrt((np.mean(X ** 2, axis=0)))\n\n# Numpy lasso instance\nestimator = lasso.NumpyLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 2)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - Numpy implementation\"\n)\n\n# Now the same stuff again in PyTorch\nX = torch.tensor(X)\ny = torch.tensor(y)\n\n# HeAT lasso instance\nestimator = lasso.PytorchLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 3)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - PyTorch implementation\"\n)\n\n# plot only with first rank\nif distributed is False:\n plt.show()\nelif distributed == 0:\n plt.show()\n", "path": "examples/lasso/demo.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nimport sys\n\nsys.path.append(\"../../\")\n\nimport heat as ht\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport heat.core.regression.lasso as lasso\nimport plotfkt\n\n# read scikit diabetes data set\ndiabetes = datasets.load_diabetes()\n\n# load diabetes dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"x\", split=0)\ny = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"y\", split=0)\n\n# normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\nX = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\n\n# HeAT lasso instance\nestimator = lasso.HeatLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n\n# plot lasso paths\nplt.subplot(3, 1, 1)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - HeAT implementation\"\n)\n\nif X.is_distributed():\n distributed = X.comm.rank\nelse:\n distributed = False\n\n# Now the same stuff in numpy\nX = diabetes.data.astype(\"float32\")\ny = diabetes.target.astype(\"float32\")\n\nm, _ = X.shape\nX = np.concatenate((np.ones((m, 1)).astype(\"float32\"), X), axis=1)\n\n# normalize dataset\nX = X / np.sqrt((np.mean(X ** 2, axis=0)))\n\n# Numpy lasso instance\nestimator = lasso.NumpyLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 2)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - Numpy implementation\"\n)\n\n# Now the same stuff again in PyTorch\nX = torch.tensor(X)\ny = torch.tensor(y)\n\n# HeAT lasso instance\nestimator = lasso.PytorchLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 3)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - PyTorch implementation\"\n)\n\n# plot only with first rank\nif distributed is False:\n plt.show()\nelif distributed == 0:\n plt.show()\n", "path": "examples/lasso/demo.py"}]} | 1,790 | 101 |
gh_patches_debug_9807 | rasdani/github-patches | git_diff | Mailu__Mailu-2404 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cant setup forward to short-named email address.
### Environment
- docker-compose
### Versions
```
docker ps -a | grep mailu/admin
f14b60868ade mailu/admin:1.9 "/bin/sh -c /start.py" 7 weeks ago Up 7 weeks (healthy) 80/tcp mailu-admin-1
```
## Description
User have email-address like [email protected] . I cant setup forward to this email via WebUI or CLI
## Replication Steps
Setup forward for user to email like [email protected]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/ui/forms.py`
Content:
```
1 from wtforms import validators, fields, widgets
2 from wtforms_components import fields as fields_
3 from flask_babel import lazy_gettext as _
4
5 import flask_login
6 import flask_wtf
7 import re
8
9 LOCALPART_REGEX = "^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*$"
10
11 class DestinationField(fields.SelectMultipleField):
12 """ Allow for multiple emails selection from current user choices and
13 additional email addresses.
14 """
15
16 validator = re.compile(r'^.+@([^.@][^@]+)$', re.IGNORECASE)
17
18 def iter_choices(self):
19 managed = [
20 str(email)
21 for email in flask_login.current_user.get_managed_emails()
22 ]
23 for email in managed:
24 selected = self.data is not None and self.coerce(email) in self.data
25 yield (email, email, selected)
26 for email in self.data or ():
27 if email not in managed:
28 yield (email, email, True)
29
30 def pre_validate(self, form):
31 for item in self.data:
32 if not self.validator.match(item):
33 raise validators.ValidationError(_('Invalid email address.'))
34
35 class MultipleEmailAddressesVerify(object):
36 def __init__(self,message=_('Invalid email address.')):
37 self.message = message
38
39 def __call__(self, form, field):
40 pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
41 if not pattern.match(field.data.replace(" ", "")):
42 raise validators.ValidationError(self.message)
43
44 class ConfirmationForm(flask_wtf.FlaskForm):
45 submit = fields.SubmitField(_('Confirm'))
46
47 class DomainForm(flask_wtf.FlaskForm):
48 name = fields.StringField(_('Domain name'), [validators.DataRequired()])
49 max_users = fields_.IntegerField(_('Maximum user count'), [validators.NumberRange(min=-1)], default=10)
50 max_aliases = fields_.IntegerField(_('Maximum alias count'), [validators.NumberRange(min=-1)], default=10)
51 max_quota_bytes = fields_.IntegerSliderField(_('Maximum user quota'), default=0)
52 signup_enabled = fields.BooleanField(_('Enable sign-up'), default=False)
53 comment = fields.StringField(_('Comment'))
54 submit = fields.SubmitField(_('Save'))
55
56
57 class DomainSignupForm(flask_wtf.FlaskForm):
58 name = fields.StringField(_('Domain name'), [validators.DataRequired()])
59 localpart = fields.StringField(_('Initial admin'), [validators.DataRequired()])
60 pw = fields.PasswordField(_('Admin password'), [validators.DataRequired()])
61 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
62 captcha = flask_wtf.RecaptchaField()
63 submit = fields.SubmitField(_('Create'))
64
65
66 class AlternativeForm(flask_wtf.FlaskForm):
67 name = fields.StringField(_('Alternative name'), [validators.DataRequired()])
68 submit = fields.SubmitField(_('Save'))
69
70
71 class RelayForm(flask_wtf.FlaskForm):
72 name = fields.StringField(_('Relayed domain name'), [validators.DataRequired()])
73 smtp = fields.StringField(_('Remote host'))
74 comment = fields.StringField(_('Comment'))
75 submit = fields.SubmitField(_('Save'))
76
77
78 class UserForm(flask_wtf.FlaskForm):
79 localpart = fields.StringField(_('E-mail'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
80 pw = fields.PasswordField(_('Password'))
81 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
82 quota_bytes = fields_.IntegerSliderField(_('Quota'), default=10**9)
83 enable_imap = fields.BooleanField(_('Allow IMAP access'), default=True)
84 enable_pop = fields.BooleanField(_('Allow POP3 access'), default=True)
85 displayed_name = fields.StringField(_('Displayed name'))
86 comment = fields.StringField(_('Comment'))
87 enabled = fields.BooleanField(_('Enabled'), default=True)
88 submit = fields.SubmitField(_('Save'))
89
90
91 class UserSignupForm(flask_wtf.FlaskForm):
92 localpart = fields.StringField(_('Email address'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
93 pw = fields.PasswordField(_('Password'), [validators.DataRequired()])
94 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
95 submit = fields.SubmitField(_('Sign up'))
96
97 class UserSignupFormCaptcha(UserSignupForm):
98 captcha = flask_wtf.RecaptchaField()
99
100 class UserSettingsForm(flask_wtf.FlaskForm):
101 displayed_name = fields.StringField(_('Displayed name'))
102 spam_enabled = fields.BooleanField(_('Enable spam filter'))
103 spam_mark_as_read = fields.BooleanField(_('Enable marking spam mails as read'))
104 spam_threshold = fields_.IntegerSliderField(_('Spam filter tolerance'))
105 forward_enabled = fields.BooleanField(_('Enable forwarding'))
106 forward_keep = fields.BooleanField(_('Keep a copy of the emails'))
107 forward_destination = fields.StringField(_('Destination'), [validators.Optional(), MultipleEmailAddressesVerify()])
108 submit = fields.SubmitField(_('Save settings'))
109
110
111 class UserPasswordForm(flask_wtf.FlaskForm):
112 pw = fields.PasswordField(_('Password'), [validators.DataRequired()])
113 pw2 = fields.PasswordField(_('Password check'), [validators.DataRequired()])
114 submit = fields.SubmitField(_('Update password'))
115
116
117 class UserReplyForm(flask_wtf.FlaskForm):
118 reply_enabled = fields.BooleanField(_('Enable automatic reply'))
119 reply_subject = fields.StringField(_('Reply subject'))
120 reply_body = fields.StringField(_('Reply body'),
121 widget=widgets.TextArea())
122 reply_startdate = fields.html5.DateField(_('Start of vacation'))
123 reply_enddate = fields.html5.DateField(_('End of vacation'))
124 submit = fields.SubmitField(_('Update'))
125
126
127 class TokenForm(flask_wtf.FlaskForm):
128 displayed_password = fields.StringField(
129 _('Your token (write it down, as it will never be displayed again)')
130 )
131 raw_password = fields.HiddenField([validators.DataRequired()])
132 comment = fields.StringField(_('Comment'))
133 ip = fields.StringField(
134 _('Authorized IP'), [validators.Optional(), validators.IPAddress(ipv6=True)]
135 )
136 submit = fields.SubmitField(_('Save'))
137
138
139 class AliasForm(flask_wtf.FlaskForm):
140 localpart = fields.StringField(_('Alias'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
141 wildcard = fields.BooleanField(
142 _('Use SQL LIKE Syntax (e.g. for catch-all aliases)'))
143 destination = DestinationField(_('Destination'))
144 comment = fields.StringField(_('Comment'))
145 submit = fields.SubmitField(_('Save'))
146
147
148 class AdminForm(flask_wtf.FlaskForm):
149 admin = fields.SelectField(_('Admin email'), choices=[])
150 submit = fields.SubmitField(_('Submit'))
151
152
153 class ManagerForm(flask_wtf.FlaskForm):
154 manager = fields.SelectField(_('Manager email'))
155 submit = fields.SubmitField(_('Submit'))
156
157
158 class FetchForm(flask_wtf.FlaskForm):
159 protocol = fields.SelectField(_('Protocol'), choices=[
160 ('imap', 'IMAP'), ('pop3', 'POP3')
161 ])
162 host = fields.StringField(_('Hostname or IP'), [validators.DataRequired()])
163 port = fields.IntegerField(_('TCP port'), [validators.DataRequired(), validators.NumberRange(min=0, max=65535)])
164 tls = fields.BooleanField(_('Enable TLS'))
165 username = fields.StringField(_('Username'), [validators.DataRequired()])
166 password = fields.PasswordField(_('Password'))
167 keep = fields.BooleanField(_('Keep emails on the server'))
168 submit = fields.SubmitField(_('Submit'))
169
170
171 class AnnouncementForm(flask_wtf.FlaskForm):
172 announcement_subject = fields.StringField(_('Announcement subject'),
173 [validators.DataRequired()])
174 announcement_body = fields.StringField(_('Announcement body'),
175 [validators.DataRequired()], widget=widgets.TextArea())
176 submit = fields.SubmitField(_('Send'))
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -37,7 +37,7 @@
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| {"golden_diff": "diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py\n--- a/core/admin/mailu/ui/forms.py\n+++ b/core/admin/mailu/ui/forms.py\n@@ -37,7 +37,7 @@\n self.message = message\n \n def __call__(self, form, field):\n- pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,}))*$')\n+ pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{1,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{2,}))*$')\n if not pattern.match(field.data.replace(\" \", \"\")):\n raise validators.ValidationError(self.message)\n", "issue": "Cant setup forward to short-named email address. \n### Environment\r\n - docker-compose\r\n\r\n\r\n### Versions\r\n```\r\ndocker ps -a | grep mailu/admin\r\nf14b60868ade mailu/admin:1.9 \"/bin/sh -c /start.py\" 7 weeks ago Up 7 weeks (healthy) 80/tcp mailu-admin-1\r\n```\r\n\r\n## Description\r\nUser have email-address like [email protected] . I cant setup forward to this email via WebUI or CLI\r\n\r\n## Replication Steps\r\nSetup forward for user to email like [email protected] \r\n\n", "before_files": [{"content": "from wtforms import validators, fields, widgets\nfrom wtforms_components import fields as fields_\nfrom flask_babel import lazy_gettext as _\n\nimport flask_login\nimport flask_wtf\nimport re\n\nLOCALPART_REGEX = \"^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*$\"\n\nclass DestinationField(fields.SelectMultipleField):\n \"\"\" Allow for multiple emails selection from current user choices and\n additional email addresses.\n \"\"\"\n\n validator = re.compile(r'^.+@([^.@][^@]+)$', re.IGNORECASE)\n\n def iter_choices(self):\n managed = [\n str(email)\n for email in flask_login.current_user.get_managed_emails()\n ]\n for email in managed:\n selected = self.data is not None and self.coerce(email) in self.data\n yield (email, email, selected)\n for email in self.data or ():\n if email not in managed:\n yield (email, email, True)\n\n def pre_validate(self, form):\n for item in self.data:\n if not self.validator.match(item):\n raise validators.ValidationError(_('Invalid email address.'))\n\nclass MultipleEmailAddressesVerify(object):\n def __init__(self,message=_('Invalid email address.')):\n self.message = message\n\n def __call__(self, form, field):\n pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,}))*$')\n if not pattern.match(field.data.replace(\" \", \"\")):\n raise validators.ValidationError(self.message)\n\nclass ConfirmationForm(flask_wtf.FlaskForm):\n submit = fields.SubmitField(_('Confirm'))\n\nclass DomainForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n max_users = fields_.IntegerField(_('Maximum user count'), [validators.NumberRange(min=-1)], default=10)\n max_aliases = fields_.IntegerField(_('Maximum alias count'), [validators.NumberRange(min=-1)], default=10)\n max_quota_bytes = fields_.IntegerSliderField(_('Maximum user quota'), default=0)\n signup_enabled = fields.BooleanField(_('Enable sign-up'), default=False)\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass DomainSignupForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n localpart = fields.StringField(_('Initial admin'), [validators.DataRequired()])\n pw = fields.PasswordField(_('Admin password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n captcha = flask_wtf.RecaptchaField()\n submit = fields.SubmitField(_('Create'))\n\n\nclass AlternativeForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Alternative name'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Save'))\n\n\nclass RelayForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Relayed domain name'), [validators.DataRequired()])\n smtp = fields.StringField(_('Remote host'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('E-mail'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'))\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n quota_bytes = fields_.IntegerSliderField(_('Quota'), default=10**9)\n enable_imap = fields.BooleanField(_('Allow IMAP access'), default=True)\n enable_pop = fields.BooleanField(_('Allow POP3 access'), default=True)\n displayed_name = fields.StringField(_('Displayed name'))\n comment = fields.StringField(_('Comment'))\n enabled = fields.BooleanField(_('Enabled'), default=True)\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserSignupForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Email address'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n submit = fields.SubmitField(_('Sign up'))\n\nclass UserSignupFormCaptcha(UserSignupForm):\n captcha = flask_wtf.RecaptchaField()\n\nclass UserSettingsForm(flask_wtf.FlaskForm):\n displayed_name = fields.StringField(_('Displayed name'))\n spam_enabled = fields.BooleanField(_('Enable spam filter'))\n spam_mark_as_read = fields.BooleanField(_('Enable marking spam mails as read'))\n spam_threshold = fields_.IntegerSliderField(_('Spam filter tolerance'))\n forward_enabled = fields.BooleanField(_('Enable forwarding'))\n forward_keep = fields.BooleanField(_('Keep a copy of the emails'))\n forward_destination = fields.StringField(_('Destination'), [validators.Optional(), MultipleEmailAddressesVerify()])\n submit = fields.SubmitField(_('Save settings'))\n\n\nclass UserPasswordForm(flask_wtf.FlaskForm):\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Password check'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Update password'))\n\n\nclass UserReplyForm(flask_wtf.FlaskForm):\n reply_enabled = fields.BooleanField(_('Enable automatic reply'))\n reply_subject = fields.StringField(_('Reply subject'))\n reply_body = fields.StringField(_('Reply body'),\n widget=widgets.TextArea())\n reply_startdate = fields.html5.DateField(_('Start of vacation'))\n reply_enddate = fields.html5.DateField(_('End of vacation'))\n submit = fields.SubmitField(_('Update'))\n\n\nclass TokenForm(flask_wtf.FlaskForm):\n displayed_password = fields.StringField(\n _('Your token (write it down, as it will never be displayed again)')\n )\n raw_password = fields.HiddenField([validators.DataRequired()])\n comment = fields.StringField(_('Comment'))\n ip = fields.StringField(\n _('Authorized IP'), [validators.Optional(), validators.IPAddress(ipv6=True)]\n )\n submit = fields.SubmitField(_('Save'))\n\n\nclass AliasForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Alias'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n wildcard = fields.BooleanField(\n _('Use SQL LIKE Syntax (e.g. for catch-all aliases)'))\n destination = DestinationField(_('Destination'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass AdminForm(flask_wtf.FlaskForm):\n admin = fields.SelectField(_('Admin email'), choices=[])\n submit = fields.SubmitField(_('Submit'))\n\n\nclass ManagerForm(flask_wtf.FlaskForm):\n manager = fields.SelectField(_('Manager email'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass FetchForm(flask_wtf.FlaskForm):\n protocol = fields.SelectField(_('Protocol'), choices=[\n ('imap', 'IMAP'), ('pop3', 'POP3')\n ])\n host = fields.StringField(_('Hostname or IP'), [validators.DataRequired()])\n port = fields.IntegerField(_('TCP port'), [validators.DataRequired(), validators.NumberRange(min=0, max=65535)])\n tls = fields.BooleanField(_('Enable TLS'))\n username = fields.StringField(_('Username'), [validators.DataRequired()])\n password = fields.PasswordField(_('Password'))\n keep = fields.BooleanField(_('Keep emails on the server'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass AnnouncementForm(flask_wtf.FlaskForm):\n announcement_subject = fields.StringField(_('Announcement subject'),\n [validators.DataRequired()])\n announcement_body = fields.StringField(_('Announcement body'),\n [validators.DataRequired()], widget=widgets.TextArea())\n submit = fields.SubmitField(_('Send'))\n", "path": "core/admin/mailu/ui/forms.py"}], "after_files": [{"content": "from wtforms import validators, fields, widgets\nfrom wtforms_components import fields as fields_\nfrom flask_babel import lazy_gettext as _\n\nimport flask_login\nimport flask_wtf\nimport re\n\nLOCALPART_REGEX = \"^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*$\"\n\nclass DestinationField(fields.SelectMultipleField):\n \"\"\" Allow for multiple emails selection from current user choices and\n additional email addresses.\n \"\"\"\n\n validator = re.compile(r'^.+@([^.@][^@]+)$', re.IGNORECASE)\n\n def iter_choices(self):\n managed = [\n str(email)\n for email in flask_login.current_user.get_managed_emails()\n ]\n for email in managed:\n selected = self.data is not None and self.coerce(email) in self.data\n yield (email, email, selected)\n for email in self.data or ():\n if email not in managed:\n yield (email, email, True)\n\n def pre_validate(self, form):\n for item in self.data:\n if not self.validator.match(item):\n raise validators.ValidationError(_('Invalid email address.'))\n\nclass MultipleEmailAddressesVerify(object):\n def __init__(self,message=_('Invalid email address.')):\n self.message = message\n\n def __call__(self, form, field):\n pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{1,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{2,}))*$')\n if not pattern.match(field.data.replace(\" \", \"\")):\n raise validators.ValidationError(self.message)\n\nclass ConfirmationForm(flask_wtf.FlaskForm):\n submit = fields.SubmitField(_('Confirm'))\n\nclass DomainForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n max_users = fields_.IntegerField(_('Maximum user count'), [validators.NumberRange(min=-1)], default=10)\n max_aliases = fields_.IntegerField(_('Maximum alias count'), [validators.NumberRange(min=-1)], default=10)\n max_quota_bytes = fields_.IntegerSliderField(_('Maximum user quota'), default=0)\n signup_enabled = fields.BooleanField(_('Enable sign-up'), default=False)\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass DomainSignupForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n localpart = fields.StringField(_('Initial admin'), [validators.DataRequired()])\n pw = fields.PasswordField(_('Admin password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n captcha = flask_wtf.RecaptchaField()\n submit = fields.SubmitField(_('Create'))\n\n\nclass AlternativeForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Alternative name'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Save'))\n\n\nclass RelayForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Relayed domain name'), [validators.DataRequired()])\n smtp = fields.StringField(_('Remote host'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('E-mail'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'))\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n quota_bytes = fields_.IntegerSliderField(_('Quota'), default=10**9)\n enable_imap = fields.BooleanField(_('Allow IMAP access'), default=True)\n enable_pop = fields.BooleanField(_('Allow POP3 access'), default=True)\n displayed_name = fields.StringField(_('Displayed name'))\n comment = fields.StringField(_('Comment'))\n enabled = fields.BooleanField(_('Enabled'), default=True)\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserSignupForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Email address'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n submit = fields.SubmitField(_('Sign up'))\n\nclass UserSignupFormCaptcha(UserSignupForm):\n captcha = flask_wtf.RecaptchaField()\n\nclass UserSettingsForm(flask_wtf.FlaskForm):\n displayed_name = fields.StringField(_('Displayed name'))\n spam_enabled = fields.BooleanField(_('Enable spam filter'))\n spam_mark_as_read = fields.BooleanField(_('Enable marking spam mails as read'))\n spam_threshold = fields_.IntegerSliderField(_('Spam filter tolerance'))\n forward_enabled = fields.BooleanField(_('Enable forwarding'))\n forward_keep = fields.BooleanField(_('Keep a copy of the emails'))\n forward_destination = fields.StringField(_('Destination'), [validators.Optional(), MultipleEmailAddressesVerify()])\n submit = fields.SubmitField(_('Save settings'))\n\n\nclass UserPasswordForm(flask_wtf.FlaskForm):\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Password check'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Update password'))\n\n\nclass UserReplyForm(flask_wtf.FlaskForm):\n reply_enabled = fields.BooleanField(_('Enable automatic reply'))\n reply_subject = fields.StringField(_('Reply subject'))\n reply_body = fields.StringField(_('Reply body'),\n widget=widgets.TextArea())\n reply_startdate = fields.html5.DateField(_('Start of vacation'))\n reply_enddate = fields.html5.DateField(_('End of vacation'))\n submit = fields.SubmitField(_('Update'))\n\n\nclass TokenForm(flask_wtf.FlaskForm):\n displayed_password = fields.StringField(\n _('Your token (write it down, as it will never be displayed again)')\n )\n raw_password = fields.HiddenField([validators.DataRequired()])\n comment = fields.StringField(_('Comment'))\n ip = fields.StringField(\n _('Authorized IP'), [validators.Optional(), validators.IPAddress(ipv6=True)]\n )\n submit = fields.SubmitField(_('Save'))\n\n\nclass AliasForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Alias'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n wildcard = fields.BooleanField(\n _('Use SQL LIKE Syntax (e.g. for catch-all aliases)'))\n destination = DestinationField(_('Destination'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass AdminForm(flask_wtf.FlaskForm):\n admin = fields.SelectField(_('Admin email'), choices=[])\n submit = fields.SubmitField(_('Submit'))\n\n\nclass ManagerForm(flask_wtf.FlaskForm):\n manager = fields.SelectField(_('Manager email'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass FetchForm(flask_wtf.FlaskForm):\n protocol = fields.SelectField(_('Protocol'), choices=[\n ('imap', 'IMAP'), ('pop3', 'POP3')\n ])\n host = fields.StringField(_('Hostname or IP'), [validators.DataRequired()])\n port = fields.IntegerField(_('TCP port'), [validators.DataRequired(), validators.NumberRange(min=0, max=65535)])\n tls = fields.BooleanField(_('Enable TLS'))\n username = fields.StringField(_('Username'), [validators.DataRequired()])\n password = fields.PasswordField(_('Password'))\n keep = fields.BooleanField(_('Keep emails on the server'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass AnnouncementForm(flask_wtf.FlaskForm):\n announcement_subject = fields.StringField(_('Announcement subject'),\n [validators.DataRequired()])\n announcement_body = fields.StringField(_('Announcement body'),\n [validators.DataRequired()], widget=widgets.TextArea())\n submit = fields.SubmitField(_('Send'))\n", "path": "core/admin/mailu/ui/forms.py"}]} | 2,548 | 280 |
gh_patches_debug_18150 | rasdani/github-patches | git_diff | sunpy__sunpy-4846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix CROTA in EUVI map source
e.g.
```python
from sunpy.map import Map
smap = Map('https://stereo-ssc.nascom.nasa.gov/pub/beacon/ahead/secchi/img/euvi/20210107/20210107_001615_n7euA.fts')
print(smap.wcs)
```
raises
```
WARNING: FITSFixedWarning: CROTA = -6.39331135705
keyword looks very much like CROTAn but isn't. [astropy.wcs.wcs]
```
It would be good to fix this in the EUVI map source.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/sources/stereo.py`
Content:
```
1 """STEREO Map subclass definitions"""
2
3 __author__ = "Keith Hughitt"
4 __email__ = "[email protected]"
5
6
7 import astropy.units as u
8 from astropy.visualization import PowerStretch
9 from astropy.visualization.mpl_normalize import ImageNormalize
10
11 from sunpy.map import GenericMap
12 from sunpy.map.sources.source_type import source_stretch
13
14 __all__ = ['EUVIMap', 'CORMap', 'HIMap']
15
16
17 class EUVIMap(GenericMap):
18 """STEREO-SECCHI EUVI Image Map
19
20 EUVI is an extreme ultraviolet (EUV) imager. Part of the STEREO-SECCHI
21 suite it observes the Sun from 1 to 1.7 solar radii. It is capable of
22 observing at 304 (He II), 171 (Fe IX), 195 (Fe XII), and 284 (Fe XV)
23 Angstroms.
24
25 References
26 ----------
27 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
28 * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_
29 * `Instrument Page <http://secchi.lmsal.com/EUVI/>`_
30 """
31
32 def __init__(self, data, header, **kwargs):
33
34 GenericMap.__init__(self, data, header, **kwargs)
35 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
36 self.plot_settings['cmap'] = 'sohoeit{wl:d}'.format(wl=int(self.wavelength.value))
37 self.plot_settings['norm'] = ImageNormalize(
38 stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)
39 self.meta['waveunit'] = 'Angstrom'
40
41 # Try to identify when the FITS meta data does not have the correct
42 # date FITS keyword
43 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
44 self.meta['date-obs'] = self.meta['date_obs']
45
46 @property
47 def rsun_arcseconds(self):
48 """
49 Radius of the sun in arcseconds.
50
51 References
52 ----------
53 https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf
54 """
55 return self.meta.get('rsun', None)
56
57 @property
58 def rsun_obs(self):
59 """
60 Radius of the sun in arcseconds as a quantity.
61
62 References
63 ----------
64 https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf
65 """
66 rsun_arcseconds = self.meta.get('rsun', None)
67
68 if rsun_arcseconds is None:
69 rsun_arcseconds = super().rsun_obs
70
71 return u.Quantity(rsun_arcseconds, 'arcsec')
72
73 @classmethod
74 def is_datasource_for(cls, data, header, **kwargs):
75 """Determines if header corresponds to an EUVI image"""
76 return header.get('detector') == 'EUVI'
77
78
79 class CORMap(GenericMap):
80 """STEREO-SECCHI CORonograph Image Map.
81
82 Part of the STEREO-SECCHI suite of remote sensing telescopes,
83 COR is a set of two coronographs (COR1, COR2) onboard STEREO.
84 They are both traditional Lyot coronagraphs.
85
86 The COR1 detectors observes from 1.3 to 4 solar radii while the
87 COR2 detectors observe a range from 2 to 15 solar radii.
88
89 References
90 ----------
91 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
92 * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_
93 * `COR1 Instrument Page <https://cor1.gsfc.nasa.gov>`_
94 * `COR2 Instrument Page <http://secchi.nrl.navy.mil/index.php?p=cor2>`_
95 * `COR1 User Guide <https://cor1.gsfc.nasa.gov/guide/>`_
96 """
97
98 def __init__(self, data, header, **kwargs):
99
100 GenericMap.__init__(self, data, header, **kwargs)
101
102 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
103 self.plot_settings['cmap'] = 'stereocor{det!s}'.format(det=self.detector[-1])
104 self.plot_settings['norm'] = ImageNormalize(
105 stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)
106
107 # Try to identify when the FITS meta data does not have the correct
108 # date FITS keyword
109 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
110 self.meta['date-obs'] = self.meta['date_obs']
111
112 @property
113 def measurement(self):
114 """
115 Returns the type of data observed.
116 """
117 # TODO: This needs to do more than white-light. Should give B, pB, etc.
118 return "white-light"
119
120 @classmethod
121 def is_datasource_for(cls, data, header, **kwargs):
122 """Determines if header corresponds to an COR image"""
123 return str(header.get('detector', '')).startswith('COR')
124
125
126 class HIMap(GenericMap):
127 """STEREO-SECCHI Heliospheric Imager (HI) Map.
128
129 The HI is a wide-angle visible-light imaging system
130 for the detection of coronal mass ejection (CME) events
131 in interplanetary space and, in particular, of events
132 directed towards the Earth.
133
134 The Heliospheric imager consists of two instruments, the HI-1 and HI-2.
135 The HI1 observes from 15-80 solar radii while HI2 observes from 80-215
136 solar radii.
137
138 References
139 ----------
140 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
141 * `STEREO SECCHI <https://secchi.nrl.navy.mil>`_
142 * `HI Instrument Page <http://www.stereo.rl.ac.uk>`_
143 """
144
145 def __init__(self, data, header, **kwargs):
146
147 GenericMap.__init__(self, data, header, **kwargs)
148 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
149 self.plot_settings['cmap'] = 'stereohi{det!s}'.format(det=self.detector[-1])
150 self.plot_settings['norm'] = ImageNormalize(
151 stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)
152
153 # Try to identify when the FITS meta data does not have the correct
154 # date FITS keyword
155 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
156 self.meta['date-obs'] = self.meta['date_obs']
157
158 @property
159 def measurement(self):
160 """
161 Returns the type of data observed.
162 """
163 # TODO: This needs to do more than white-light. Should give B, pB, etc.
164 return "white-light"
165
166 @classmethod
167 def is_datasource_for(cls, data, header, **kwargs):
168 """Determines if header corresponds to an COR image"""
169 return str(header.get('detector', '')).startswith('HI')
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/map/sources/stereo.py b/sunpy/map/sources/stereo.py
--- a/sunpy/map/sources/stereo.py
+++ b/sunpy/map/sources/stereo.py
@@ -8,6 +8,7 @@
from astropy.visualization import PowerStretch
from astropy.visualization.mpl_normalize import ImageNormalize
+from sunpy import log
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
@@ -42,6 +43,10 @@
# date FITS keyword
if ('date_obs' in self.meta) and not('date-obs' in self.meta):
self.meta['date-obs'] = self.meta['date_obs']
+ # fix CROTA to CROTAn
+ if "crota" in self.meta and "crota2" not in self.meta:
+ log.debug("EUVIMap: Changing the CROTA keyword to CROTA2")
+ self.meta["crota2"] = self.meta.pop("crota")
@property
def rsun_arcseconds(self):
| {"golden_diff": "diff --git a/sunpy/map/sources/stereo.py b/sunpy/map/sources/stereo.py\n--- a/sunpy/map/sources/stereo.py\n+++ b/sunpy/map/sources/stereo.py\n@@ -8,6 +8,7 @@\n from astropy.visualization import PowerStretch\n from astropy.visualization.mpl_normalize import ImageNormalize\n \n+from sunpy import log\n from sunpy.map import GenericMap\n from sunpy.map.sources.source_type import source_stretch\n \n@@ -42,6 +43,10 @@\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n+ # fix CROTA to CROTAn\n+ if \"crota\" in self.meta and \"crota2\" not in self.meta:\n+ log.debug(\"EUVIMap: Changing the CROTA keyword to CROTA2\")\n+ self.meta[\"crota2\"] = self.meta.pop(\"crota\")\n \n @property\n def rsun_arcseconds(self):\n", "issue": "Fix CROTA in EUVI map source\ne.g. \r\n```python\r\nfrom sunpy.map import Map\r\n\r\nsmap = Map('https://stereo-ssc.nascom.nasa.gov/pub/beacon/ahead/secchi/img/euvi/20210107/20210107_001615_n7euA.fts')\r\nprint(smap.wcs)\r\n```\r\nraises\r\n```\r\nWARNING: FITSFixedWarning: CROTA = -6.39331135705 \r\nkeyword looks very much like CROTAn but isn't. [astropy.wcs.wcs]\r\n```\r\nIt would be good to fix this in the EUVI map source.\n", "before_files": [{"content": "\"\"\"STEREO Map subclass definitions\"\"\"\n\n__author__ = \"Keith Hughitt\"\n__email__ = \"[email protected]\"\n\n\nimport astropy.units as u\nfrom astropy.visualization import PowerStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['EUVIMap', 'CORMap', 'HIMap']\n\n\nclass EUVIMap(GenericMap):\n \"\"\"STEREO-SECCHI EUVI Image Map\n\n EUVI is an extreme ultraviolet (EUV) imager. Part of the STEREO-SECCHI\n suite it observes the Sun from 1 to 1.7 solar radii. It is capable of\n observing at 304 (He II), 171 (Fe IX), 195 (Fe XII), and 284 (Fe XV)\n Angstroms.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `Instrument Page <http://secchi.lmsal.com/EUVI/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'sohoeit{wl:d}'.format(wl=int(self.wavelength.value))\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n self.meta['waveunit'] = 'Angstrom'\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def rsun_arcseconds(self):\n \"\"\"\n Radius of the sun in arcseconds.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n return self.meta.get('rsun', None)\n\n @property\n def rsun_obs(self):\n \"\"\"\n Radius of the sun in arcseconds as a quantity.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n rsun_arcseconds = self.meta.get('rsun', None)\n\n if rsun_arcseconds is None:\n rsun_arcseconds = super().rsun_obs\n\n return u.Quantity(rsun_arcseconds, 'arcsec')\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUVI image\"\"\"\n return header.get('detector') == 'EUVI'\n\n\nclass CORMap(GenericMap):\n \"\"\"STEREO-SECCHI CORonograph Image Map.\n\n Part of the STEREO-SECCHI suite of remote sensing telescopes,\n COR is a set of two coronographs (COR1, COR2) onboard STEREO.\n They are both traditional Lyot coronagraphs.\n\n The COR1 detectors observes from 1.3 to 4 solar radii while the\n COR2 detectors observe a range from 2 to 15 solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `COR1 Instrument Page <https://cor1.gsfc.nasa.gov>`_\n * `COR2 Instrument Page <http://secchi.nrl.navy.mil/index.php?p=cor2>`_\n * `COR1 User Guide <https://cor1.gsfc.nasa.gov/guide/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereocor{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('COR')\n\n\nclass HIMap(GenericMap):\n \"\"\"STEREO-SECCHI Heliospheric Imager (HI) Map.\n\n The HI is a wide-angle visible-light imaging system\n for the detection of coronal mass ejection (CME) events\n in interplanetary space and, in particular, of events\n directed towards the Earth.\n\n The Heliospheric imager consists of two instruments, the HI-1 and HI-2.\n The HI1 observes from 15-80 solar radii while HI2 observes from 80-215\n solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <https://secchi.nrl.navy.mil>`_\n * `HI Instrument Page <http://www.stereo.rl.ac.uk>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereohi{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('HI')\n", "path": "sunpy/map/sources/stereo.py"}], "after_files": [{"content": "\"\"\"STEREO Map subclass definitions\"\"\"\n\n__author__ = \"Keith Hughitt\"\n__email__ = \"[email protected]\"\n\n\nimport astropy.units as u\nfrom astropy.visualization import PowerStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy import log\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['EUVIMap', 'CORMap', 'HIMap']\n\n\nclass EUVIMap(GenericMap):\n \"\"\"STEREO-SECCHI EUVI Image Map\n\n EUVI is an extreme ultraviolet (EUV) imager. Part of the STEREO-SECCHI\n suite it observes the Sun from 1 to 1.7 solar radii. It is capable of\n observing at 304 (He II), 171 (Fe IX), 195 (Fe XII), and 284 (Fe XV)\n Angstroms.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `Instrument Page <http://secchi.lmsal.com/EUVI/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'sohoeit{wl:d}'.format(wl=int(self.wavelength.value))\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n self.meta['waveunit'] = 'Angstrom'\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n # fix CROTA to CROTAn\n if \"crota\" in self.meta and \"crota2\" not in self.meta:\n log.debug(\"EUVIMap: Changing the CROTA keyword to CROTA2\")\n self.meta[\"crota2\"] = self.meta.pop(\"crota\")\n\n @property\n def rsun_arcseconds(self):\n \"\"\"\n Radius of the sun in arcseconds.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n return self.meta.get('rsun', None)\n\n @property\n def rsun_obs(self):\n \"\"\"\n Radius of the sun in arcseconds as a quantity.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n rsun_arcseconds = self.meta.get('rsun', None)\n\n if rsun_arcseconds is None:\n rsun_arcseconds = super().rsun_obs\n\n return u.Quantity(rsun_arcseconds, 'arcsec')\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUVI image\"\"\"\n return header.get('detector') == 'EUVI'\n\n\nclass CORMap(GenericMap):\n \"\"\"STEREO-SECCHI CORonograph Image Map.\n\n Part of the STEREO-SECCHI suite of remote sensing telescopes,\n COR is a set of two coronographs (COR1, COR2) onboard STEREO.\n They are both traditional Lyot coronagraphs.\n\n The COR1 detectors observes from 1.3 to 4 solar radii while the\n COR2 detectors observe a range from 2 to 15 solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `COR1 Instrument Page <https://cor1.gsfc.nasa.gov>`_\n * `COR2 Instrument Page <http://secchi.nrl.navy.mil/index.php?p=cor2>`_\n * `COR1 User Guide <https://cor1.gsfc.nasa.gov/guide/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereocor{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('COR')\n\n\nclass HIMap(GenericMap):\n \"\"\"STEREO-SECCHI Heliospheric Imager (HI) Map.\n\n The HI is a wide-angle visible-light imaging system\n for the detection of coronal mass ejection (CME) events\n in interplanetary space and, in particular, of events\n directed towards the Earth.\n\n The Heliospheric imager consists of two instruments, the HI-1 and HI-2.\n The HI1 observes from 15-80 solar radii while HI2 observes from 80-215\n solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <https://secchi.nrl.navy.mil>`_\n * `HI Instrument Page <http://www.stereo.rl.ac.uk>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereohi{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('HI')\n", "path": "sunpy/map/sources/stereo.py"}]} | 2,463 | 241 |
gh_patches_debug_18021 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django 1.10 support
https://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19
`'1.10'` is less than `'1.7.x'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/__init__.py`
Content:
```
1 from __future__ import unicode_literals
2 import warnings
3
4 from django import get_version as get_django_version
5
6 __title__ = "dj-stripe"
7 __summary__ = "Django + Stripe Made Easy"
8 __uri__ = "https://github.com/kavdev/dj-stripe/"
9
10 __version__ = "0.9.0.dev0"
11
12 __author__ = "Alexander Kavanaugh"
13 __email__ = "[email protected]"
14
15 __license__ = "BSD"
16 __license__ = "License :: OSI Approved :: BSD License"
17 __copyright__ = "Copyright 2016 Alexander Kavanaugh"
18
19 if get_django_version() <= '1.7.x':
20 msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \
21 "supported. Please upgrade to Django 1.8 or higher.\n" \
22 "Reference: https://github.com/kavdev/dj-stripe/issues/275"
23 warnings.warn(msg)
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/__init__.py b/djstripe/__init__.py
--- a/djstripe/__init__.py
+++ b/djstripe/__init__.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
import warnings
-from django import get_version as get_django_version
+from django import VERSION as django_version
__title__ = "dj-stripe"
__summary__ = "Django + Stripe Made Easy"
@@ -16,7 +16,7 @@
__license__ = "License :: OSI Approved :: BSD License"
__copyright__ = "Copyright 2016 Alexander Kavanaugh"
-if get_django_version() <= '1.7.x':
+if django_version < (1, 8):
msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \
"supported. Please upgrade to Django 1.8 or higher.\n" \
"Reference: https://github.com/kavdev/dj-stripe/issues/275"
| {"golden_diff": "diff --git a/djstripe/__init__.py b/djstripe/__init__.py\n--- a/djstripe/__init__.py\n+++ b/djstripe/__init__.py\n@@ -1,7 +1,7 @@\n from __future__ import unicode_literals\n import warnings\n \n-from django import get_version as get_django_version\n+from django import VERSION as django_version\n \n __title__ = \"dj-stripe\"\n __summary__ = \"Django + Stripe Made Easy\"\n@@ -16,7 +16,7 @@\n __license__ = \"License :: OSI Approved :: BSD License\"\n __copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n \n-if get_django_version() <= '1.7.x':\n+if django_version < (1, 8):\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n", "issue": "Django 1.10 support\nhttps://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19\n\n`'1.10'` is less than `'1.7.x'`\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport warnings\n\nfrom django import get_version as get_django_version\n\n__title__ = \"dj-stripe\"\n__summary__ = \"Django + Stripe Made Easy\"\n__uri__ = \"https://github.com/kavdev/dj-stripe/\"\n\n__version__ = \"0.9.0.dev0\"\n\n__author__ = \"Alexander Kavanaugh\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD\"\n__license__ = \"License :: OSI Approved :: BSD License\"\n__copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n\nif get_django_version() <= '1.7.x':\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n warnings.warn(msg)\n", "path": "djstripe/__init__.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nimport warnings\n\nfrom django import VERSION as django_version\n\n__title__ = \"dj-stripe\"\n__summary__ = \"Django + Stripe Made Easy\"\n__uri__ = \"https://github.com/kavdev/dj-stripe/\"\n\n__version__ = \"0.9.0.dev0\"\n\n__author__ = \"Alexander Kavanaugh\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD\"\n__license__ = \"License :: OSI Approved :: BSD License\"\n__copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n\nif django_version < (1, 8):\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n warnings.warn(msg)\n", "path": "djstripe/__init__.py"}]} | 566 | 229 |
gh_patches_debug_4821 | rasdani/github-patches | git_diff | numpy__numpy-13306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indicate attempted allocation size when constructing an array gives a MemoryError
I've seen multiple questions now of the form "why does this give a MemoryError" when the reason is the user ended up allocating a bigger array than expected.
It would be helpful to include the shape and dtype in the error message.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/core/_exceptions.py`
Content:
```
1 """
2 Various richly-typed exceptions, that also help us deal with string formatting
3 in python where it's easier.
4
5 By putting the formatting in `__str__`, we also avoid paying the cost for
6 users who silence the exceptions.
7 """
8 from numpy.core.overrides import set_module
9
10 def _unpack_tuple(tup):
11 if len(tup) == 1:
12 return tup[0]
13 else:
14 return tup
15
16
17 def _display_as_base(cls):
18 """
19 A decorator that makes an exception class look like its base.
20
21 We use this to hide subclasses that are implementation details - the user
22 should catch the base type, which is what the traceback will show them.
23
24 Classes decorated with this decorator are subject to removal without a
25 deprecation warning.
26 """
27 assert issubclass(cls, Exception)
28 cls.__name__ = cls.__base__.__name__
29 cls.__qualname__ = cls.__base__.__qualname__
30 return cls
31
32
33 class UFuncTypeError(TypeError):
34 """ Base class for all ufunc exceptions """
35 def __init__(self, ufunc):
36 self.ufunc = ufunc
37
38
39 @_display_as_base
40 class _UFuncNoLoopError(UFuncTypeError):
41 """ Thrown when a ufunc loop cannot be found """
42 def __init__(self, ufunc, dtypes):
43 super().__init__(ufunc)
44 self.dtypes = tuple(dtypes)
45
46 def __str__(self):
47 return (
48 "ufunc {!r} did not contain a loop with signature matching types "
49 "{!r} -> {!r}"
50 ).format(
51 self.ufunc.__name__,
52 _unpack_tuple(self.dtypes[:self.ufunc.nin]),
53 _unpack_tuple(self.dtypes[self.ufunc.nin:])
54 )
55
56
57 @_display_as_base
58 class _UFuncCastingError(UFuncTypeError):
59 def __init__(self, ufunc, casting, from_, to):
60 super().__init__(ufunc)
61 self.casting = casting
62 self.from_ = from_
63 self.to = to
64
65
66 @_display_as_base
67 class _UFuncInputCastingError(_UFuncCastingError):
68 """ Thrown when a ufunc input cannot be casted """
69 def __init__(self, ufunc, casting, from_, to, i):
70 super().__init__(ufunc, casting, from_, to)
71 self.in_i = i
72
73 def __str__(self):
74 # only show the number if more than one input exists
75 i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
76 return (
77 "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
78 "rule {!r}"
79 ).format(
80 self.ufunc.__name__, i_str, self.from_, self.to, self.casting
81 )
82
83
84 @_display_as_base
85 class _UFuncOutputCastingError(_UFuncCastingError):
86 """ Thrown when a ufunc output cannot be casted """
87 def __init__(self, ufunc, casting, from_, to, i):
88 super().__init__(ufunc, casting, from_, to)
89 self.out_i = i
90
91 def __str__(self):
92 # only show the number if more than one output exists
93 i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
94 return (
95 "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
96 "rule {!r}"
97 ).format(
98 self.ufunc.__name__, i_str, self.from_, self.to, self.casting
99 )
100
101
102 # Exception used in shares_memory()
103 @set_module('numpy')
104 class TooHardError(RuntimeError):
105 pass
106
107
108 @set_module('numpy')
109 class AxisError(ValueError, IndexError):
110 """ Axis supplied was invalid. """
111 def __init__(self, axis, ndim=None, msg_prefix=None):
112 # single-argument form just delegates to base class
113 if ndim is None and msg_prefix is None:
114 msg = axis
115
116 # do the string formatting here, to save work in the C code
117 else:
118 msg = ("axis {} is out of bounds for array of dimension {}"
119 .format(axis, ndim))
120 if msg_prefix is not None:
121 msg = "{}: {}".format(msg_prefix, msg)
122
123 super(AxisError, self).__init__(msg)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py
--- a/numpy/core/_exceptions.py
+++ b/numpy/core/_exceptions.py
@@ -121,3 +121,15 @@
msg = "{}: {}".format(msg_prefix, msg)
super(AxisError, self).__init__(msg)
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+ """ Thrown when an array cannot be allocated"""
+ def __init__(self, shape, dtype):
+ self.shape = shape
+ self.dtype = dtype
+
+ def __str__(self):
+ return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype)
+
| {"golden_diff": "diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py\n--- a/numpy/core/_exceptions.py\n+++ b/numpy/core/_exceptions.py\n@@ -121,3 +121,15 @@\n msg = \"{}: {}\".format(msg_prefix, msg)\n \n super(AxisError, self).__init__(msg)\n+\n+\n+@_display_as_base\n+class _ArrayMemoryError(MemoryError):\n+ \"\"\" Thrown when an array cannot be allocated\"\"\"\n+ def __init__(self, shape, dtype):\n+ self.shape = shape\n+ self.dtype = dtype\n+\n+ def __str__(self):\n+ return \"Unable to allocate array with shape {} and data type {}\".format(self.shape, self.dtype)\n+\n", "issue": "Indicate attempted allocation size when constructing an array gives a MemoryError\nI've seen multiple questions now of the form \"why does this give a MemoryError\" when the reason is the user ended up allocating a bigger array than expected.\r\n\r\nIt would be helpful to include the shape and dtype in the error message.\n", "before_files": [{"content": "\"\"\"\nVarious richly-typed exceptions, that also help us deal with string formatting\nin python where it's easier.\n\nBy putting the formatting in `__str__`, we also avoid paying the cost for\nusers who silence the exceptions.\n\"\"\"\nfrom numpy.core.overrides import set_module\n\ndef _unpack_tuple(tup):\n if len(tup) == 1:\n return tup[0]\n else:\n return tup\n\n\ndef _display_as_base(cls):\n \"\"\"\n A decorator that makes an exception class look like its base.\n\n We use this to hide subclasses that are implementation details - the user\n should catch the base type, which is what the traceback will show them.\n\n Classes decorated with this decorator are subject to removal without a\n deprecation warning.\n \"\"\"\n assert issubclass(cls, Exception)\n cls.__name__ = cls.__base__.__name__\n cls.__qualname__ = cls.__base__.__qualname__\n return cls\n\n\nclass UFuncTypeError(TypeError):\n \"\"\" Base class for all ufunc exceptions \"\"\"\n def __init__(self, ufunc):\n self.ufunc = ufunc\n\n\n@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n \"\"\" Thrown when a ufunc loop cannot be found \"\"\"\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc)\n self.dtypes = tuple(dtypes)\n\n def __str__(self):\n return (\n \"ufunc {!r} did not contain a loop with signature matching types \"\n \"{!r} -> {!r}\"\n ).format(\n self.ufunc.__name__,\n _unpack_tuple(self.dtypes[:self.ufunc.nin]),\n _unpack_tuple(self.dtypes[self.ufunc.nin:])\n )\n\n\n@_display_as_base\nclass _UFuncCastingError(UFuncTypeError):\n def __init__(self, ufunc, casting, from_, to):\n super().__init__(ufunc)\n self.casting = casting\n self.from_ = from_\n self.to = to\n\n\n@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc input cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.in_i = i\n\n def __str__(self):\n # only show the number if more than one input exists\n i_str = \"{} \".format(self.in_i) if self.ufunc.nin != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc output cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n # only show the number if more than one output exists\n i_str = \"{} \".format(self.out_i) if self.ufunc.nout != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n# Exception used in shares_memory()\n@set_module('numpy')\nclass TooHardError(RuntimeError):\n pass\n\n\n@set_module('numpy')\nclass AxisError(ValueError, IndexError):\n \"\"\" Axis supplied was invalid. \"\"\"\n def __init__(self, axis, ndim=None, msg_prefix=None):\n # single-argument form just delegates to base class\n if ndim is None and msg_prefix is None:\n msg = axis\n\n # do the string formatting here, to save work in the C code\n else:\n msg = (\"axis {} is out of bounds for array of dimension {}\"\n .format(axis, ndim))\n if msg_prefix is not None:\n msg = \"{}: {}\".format(msg_prefix, msg)\n\n super(AxisError, self).__init__(msg)\n", "path": "numpy/core/_exceptions.py"}], "after_files": [{"content": "\"\"\"\nVarious richly-typed exceptions, that also help us deal with string formatting\nin python where it's easier.\n\nBy putting the formatting in `__str__`, we also avoid paying the cost for\nusers who silence the exceptions.\n\"\"\"\nfrom numpy.core.overrides import set_module\n\ndef _unpack_tuple(tup):\n if len(tup) == 1:\n return tup[0]\n else:\n return tup\n\n\ndef _display_as_base(cls):\n \"\"\"\n A decorator that makes an exception class look like its base.\n\n We use this to hide subclasses that are implementation details - the user\n should catch the base type, which is what the traceback will show them.\n\n Classes decorated with this decorator are subject to removal without a\n deprecation warning.\n \"\"\"\n assert issubclass(cls, Exception)\n cls.__name__ = cls.__base__.__name__\n cls.__qualname__ = cls.__base__.__qualname__\n return cls\n\n\nclass UFuncTypeError(TypeError):\n \"\"\" Base class for all ufunc exceptions \"\"\"\n def __init__(self, ufunc):\n self.ufunc = ufunc\n\n\n@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n \"\"\" Thrown when a ufunc loop cannot be found \"\"\"\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc)\n self.dtypes = tuple(dtypes)\n\n def __str__(self):\n return (\n \"ufunc {!r} did not contain a loop with signature matching types \"\n \"{!r} -> {!r}\"\n ).format(\n self.ufunc.__name__,\n _unpack_tuple(self.dtypes[:self.ufunc.nin]),\n _unpack_tuple(self.dtypes[self.ufunc.nin:])\n )\n\n\n@_display_as_base\nclass _UFuncCastingError(UFuncTypeError):\n def __init__(self, ufunc, casting, from_, to):\n super().__init__(ufunc)\n self.casting = casting\n self.from_ = from_\n self.to = to\n\n\n@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc input cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.in_i = i\n\n def __str__(self):\n # only show the number if more than one input exists\n i_str = \"{} \".format(self.in_i) if self.ufunc.nin != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc output cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n # only show the number if more than one output exists\n i_str = \"{} \".format(self.out_i) if self.ufunc.nout != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n# Exception used in shares_memory()\n@set_module('numpy')\nclass TooHardError(RuntimeError):\n pass\n\n\n@set_module('numpy')\nclass AxisError(ValueError, IndexError):\n \"\"\" Axis supplied was invalid. \"\"\"\n def __init__(self, axis, ndim=None, msg_prefix=None):\n # single-argument form just delegates to base class\n if ndim is None and msg_prefix is None:\n msg = axis\n\n # do the string formatting here, to save work in the C code\n else:\n msg = (\"axis {} is out of bounds for array of dimension {}\"\n .format(axis, ndim))\n if msg_prefix is not None:\n msg = \"{}: {}\".format(msg_prefix, msg)\n\n super(AxisError, self).__init__(msg)\n\n\n@_display_as_base\nclass _ArrayMemoryError(MemoryError):\n \"\"\" Thrown when an array cannot be allocated\"\"\"\n def __init__(self, shape, dtype):\n self.shape = shape\n self.dtype = dtype\n\n def __str__(self):\n return \"Unable to allocate array with shape {} and data type {}\".format(self.shape, self.dtype)\n\n", "path": "numpy/core/_exceptions.py"}]} | 1,547 | 165 |
gh_patches_debug_30870 | rasdani/github-patches | git_diff | ManimCommunity__manim-1368 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version checker throws an error when manim is run without internet
## Description of bug / unexpected behavior
Title.
## Expected behavior
Version checker should just log that the version can't be checked, and not produce a traceback.
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
Paste your code here.
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
<details><summary>Images/GIFs</summary>
<!-- PASTE MEDIA HERE -->
</details>
## Logs
<details><summary>Terminal output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
C:\Users\Daryl\manim-dev\manim>manim -p test.py
Manim Community v0.4.0
1: Become
2: BecomePoints
3: BecomePointsScene
4: BecomeStyle
5: BecomeStyle2
6: Image
Choose number corresponding to desired scene/arguments.
(Use comma separated list for multiple entries)
Choice(s): 6
[04/19/21 10:55:41] INFO scene_file_writer.py:585
File ready at C:\Users\Daryl\manim-dev\manim\media\images\test\Ima
ge_ManimCE_v0.4.0.png
INFO Rendered Image scene.py:199
Played 0 animations
INFO Previewed File at: file_ops.py:98
C:\Users\Daryl\manim-dev\manim\media\images\test\Image_ManimCE_v0.4.0.png
Traceback (most recent call last):
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\Scripts\manim", line 5, in <module>
main()
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "C:\Users\Daryl\manim-dev\manim\manim\cli\render\commands.py", line 166, in render
stable = req_info.json()["info"]["version"]
AttributeError: 'dict' object has no attribute 'json'
```
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):
- RAM:
- Python version (`python/py/python3 --version`):
- Installed modules (provide output from `pip list`):
```
PASTE HERE
```
</details>
<details><summary>LaTeX details</summary>
+ LaTeX distribution (e.g. TeX Live 2020):
+ Installed LaTeX packages:
<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->
</details>
<details><summary>FFMPEG</summary>
Output of `ffmpeg -version`:
```
PASTE HERE
```
</details>
## Additional comments
<!-- Add further context that you think might be relevant for this issue here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/cli/render/commands.py`
Content:
```
1 """Manim's default subcommand, render.
2
3 Manim's render subcommand is accessed in the command-line interface via
4 ``manim``, but can be more explicitly accessed with ``manim render``. Here you
5 can specify options, and arguments for the render command.
6
7 """
8 import json
9 import sys
10 from pathlib import Path
11 from textwrap import dedent
12
13 import click
14 import cloup
15 import requests
16
17 from ... import __version__, config, console, logger
18 from ...constants import CONTEXT_SETTINGS, EPILOG
19 from ...utils.exceptions import RerunSceneException
20 from ...utils.module_ops import scene_classes_from_file
21 from .ease_of_access_options import ease_of_access_options
22 from .global_options import global_options
23 from .output_options import output_options
24 from .render_options import render_options
25
26
27 @cloup.command(
28 context_settings=CONTEXT_SETTINGS,
29 epilog=EPILOG,
30 )
31 @click.argument("file", type=Path, required=True)
32 @click.argument("scene_names", required=False, nargs=-1)
33 @global_options
34 @output_options
35 @render_options
36 @ease_of_access_options
37 @click.pass_context
38 def render(
39 ctx,
40 **args,
41 ):
42 """Render SCENE(S) from the input FILE.
43
44 FILE is the file path of the script.
45
46 SCENES is an optional list of scenes in the file.
47 """
48 for scene in args["scene_names"]:
49 if str(scene).startswith("-"):
50 logger.warning(
51 dedent(
52 """\
53 Manim Community has moved to Click for the CLI.
54
55 This means that options in the CLI are provided BEFORE the positional
56 arguments for your FILE and SCENE(s):
57 `manim render [OPTIONS] [FILE] [SCENES]...`
58
59 For example:
60 New way - `manim -p -ql file.py SceneName1 SceneName2 ...`
61 Old way - `manim file.py SceneName1 SceneName2 ... -p -ql`
62
63 To see the help page for the new available options, run:
64 `manim render -h`
65 """
66 )
67 )
68 sys.exit()
69
70 if args["use_opengl_renderer"]:
71 logger.warning(
72 "--use_opengl_renderer is deprecated, please use --renderer=opengl instead!"
73 )
74 renderer = "opengl"
75
76 if args["use_webgl_renderer"]:
77 logger.warning(
78 "--use_webgl_renderer is deprecated, please use --renderer=webgl instead!"
79 )
80 renderer = "webgl"
81
82 if args["use_webgl_renderer"] and args["use_opengl_renderer"]:
83 logger.warning("You may select only one renderer!")
84 sys.exit()
85
86 class ClickArgs:
87 def __init__(self, args):
88 for name in args:
89 setattr(self, name, args[name])
90
91 def _get_kwargs(self):
92 return list(self.__dict__.items())
93
94 def __eq__(self, other):
95 if not isinstance(other, ClickArgs):
96 return NotImplemented
97 return vars(self) == vars(other)
98
99 def __contains__(self, key):
100 return key in self.__dict__
101
102 def __repr__(self):
103 return str(self.__dict__)
104
105 click_args = ClickArgs(args)
106 if args["jupyter"]:
107 return click_args
108
109 config.digest_args(click_args)
110 file = args["file"]
111 if config.renderer == "opengl":
112 from manim.renderer.opengl_renderer import OpenGLRenderer
113
114 for SceneClass in scene_classes_from_file(file):
115 try:
116 renderer = OpenGLRenderer()
117 while True:
118 scene_classes = scene_classes_from_file(file)
119 SceneClass = scene_classes[0]
120 scene = SceneClass(renderer)
121 status = scene.render()
122 if status:
123 continue
124 else:
125 break
126 except Exception:
127 console.print_exception()
128 elif config.renderer == "webgl":
129 try:
130 from manim.grpc.impl import frame_server_impl
131
132 server = frame_server_impl.get(file)
133 server.start()
134 server.wait_for_termination()
135 except ModuleNotFoundError:
136 console.print(
137 "Dependencies for the WebGL render are missing. Run "
138 "pip install manim[webgl_renderer] to install them."
139 )
140 console.print_exception()
141 else:
142 for SceneClass in scene_classes_from_file(file):
143 try:
144 scene = SceneClass()
145 scene.render()
146 except Exception:
147 console.print_exception()
148
149 if config.notify_outdated_version:
150 manim_info_url = "https://pypi.org/pypi/manim/json"
151 warn_prompt = "Cannot check if latest release of manim is installed"
152 req_info = {}
153
154 try:
155 req_info = requests.get(manim_info_url)
156 req_info.raise_for_status()
157 except requests.exceptions.HTTPError:
158 logger.debug(f"HTTP Error: {warn_prompt}")
159 except requests.exceptions.ConnectionError:
160 logger.debug(f"Connection Error: {warn_prompt}")
161 except requests.exceptions.Timeout:
162 logger.debug(f"Timed Out: {warn_prompt}")
163 except Exception:
164 logger.debug(f"Something went wrong: {warn_prompt}")
165
166 try:
167 stable = req_info.json()["info"]["version"]
168
169 if stable != __version__:
170 console.print(
171 f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available."
172 )
173 console.print(
174 "You should consider upgrading via [yellow]pip install -U manim[/yellow]"
175 )
176 except json.JSONDecodeError:
177 logger.debug(warn_prompt)
178 logger.debug(f"Error decoding JSON from {manim_info_url}")
179
180 return args
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py
--- a/manim/cli/render/commands.py
+++ b/manim/cli/render/commands.py
@@ -154,18 +154,8 @@
try:
req_info = requests.get(manim_info_url)
req_info.raise_for_status()
- except requests.exceptions.HTTPError:
- logger.debug(f"HTTP Error: {warn_prompt}")
- except requests.exceptions.ConnectionError:
- logger.debug(f"Connection Error: {warn_prompt}")
- except requests.exceptions.Timeout:
- logger.debug(f"Timed Out: {warn_prompt}")
- except Exception:
- logger.debug(f"Something went wrong: {warn_prompt}")
- try:
stable = req_info.json()["info"]["version"]
-
if stable != __version__:
console.print(
f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available."
@@ -173,8 +163,16 @@
console.print(
"You should consider upgrading via [yellow]pip install -U manim[/yellow]"
)
+ except requests.exceptions.HTTPError:
+ logger.debug(f"HTTP Error: {warn_prompt}")
+ except requests.exceptions.ConnectionError:
+ logger.debug(f"Connection Error: {warn_prompt}")
+ except requests.exceptions.Timeout:
+ logger.debug(f"Timed Out: {warn_prompt}")
except json.JSONDecodeError:
logger.debug(warn_prompt)
logger.debug(f"Error decoding JSON from {manim_info_url}")
+ except Exception:
+ logger.debug(f"Something went wrong: {warn_prompt}")
return args
| {"golden_diff": "diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py\n--- a/manim/cli/render/commands.py\n+++ b/manim/cli/render/commands.py\n@@ -154,18 +154,8 @@\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n- except requests.exceptions.HTTPError:\n- logger.debug(f\"HTTP Error: {warn_prompt}\")\n- except requests.exceptions.ConnectionError:\n- logger.debug(f\"Connection Error: {warn_prompt}\")\n- except requests.exceptions.Timeout:\n- logger.debug(f\"Timed Out: {warn_prompt}\")\n- except Exception:\n- logger.debug(f\"Something went wrong: {warn_prompt}\")\n \n- try:\n stable = req_info.json()[\"info\"][\"version\"]\n-\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\"\n@@ -173,8 +163,16 @@\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\"\n )\n+ except requests.exceptions.HTTPError:\n+ logger.debug(f\"HTTP Error: {warn_prompt}\")\n+ except requests.exceptions.ConnectionError:\n+ logger.debug(f\"Connection Error: {warn_prompt}\")\n+ except requests.exceptions.Timeout:\n+ logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n+ except Exception:\n+ logger.debug(f\"Something went wrong: {warn_prompt}\")\n \n return args\n", "issue": "Version checker throws an error when manim is run without internet\n## Description of bug / unexpected behavior\r\nTitle.\r\n\r\n\r\n## Expected behavior\r\nVersion checker should just log that the version can't be checked, and not produce a traceback.\r\n\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nPaste your code here.\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\n\r\n<details><summary>Images/GIFs</summary>\r\n\r\n<!-- PASTE MEDIA HERE -->\r\n\r\n</details>\r\n\r\n\r\n## Logs\r\n<details><summary>Terminal output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\nC:\\Users\\Daryl\\manim-dev\\manim>manim -p test.py\r\nManim Community v0.4.0\r\n\r\n1: Become\r\n2: BecomePoints\r\n3: BecomePointsScene\r\n4: BecomeStyle\r\n5: BecomeStyle2\r\n6: Image\r\n\r\nChoose number corresponding to desired scene/arguments.\r\n(Use comma separated list for multiple entries)\r\nChoice(s): 6\r\n[04/19/21 10:55:41] INFO scene_file_writer.py:585\r\n File ready at C:\\Users\\Daryl\\manim-dev\\manim\\media\\images\\test\\Ima\r\n ge_ManimCE_v0.4.0.png\r\n\r\n INFO Rendered Image scene.py:199\r\n Played 0 animations\r\n INFO Previewed File at: file_ops.py:98\r\n C:\\Users\\Daryl\\manim-dev\\manim\\media\\images\\test\\Image_ManimCE_v0.4.0.png\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\Scripts\\manim\", line 5, in <module>\r\n main()\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\decorators.py\", line 21, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"C:\\Users\\Daryl\\manim-dev\\manim\\manim\\cli\\render\\commands.py\", line 166, in render\r\n stable = req_info.json()[\"info\"][\"version\"]\r\nAttributeError: 'dict' object has no attribute 'json'\r\n```\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):\r\n- RAM:\r\n- Python version (`python/py/python3 --version`):\r\n- Installed modules (provide output from `pip list`):\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n<details><summary>LaTeX details</summary>\r\n\r\n+ LaTeX distribution (e.g. TeX Live 2020):\r\n+ Installed LaTeX packages:\r\n<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->\r\n</details>\r\n\r\n<details><summary>FFMPEG</summary>\r\n\r\nOutput of `ffmpeg -version`:\r\n\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant for this issue here. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nimport json\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, logger\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.exceptions import RerunSceneException\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options\n@ease_of_access_options\[email protected]_context\ndef render(\n ctx,\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n for scene in args[\"scene_names\"]:\n if str(scene).startswith(\"-\"):\n logger.warning(\n dedent(\n \"\"\"\\\n Manim Community has moved to Click for the CLI.\n\n This means that options in the CLI are provided BEFORE the positional\n arguments for your FILE and SCENE(s):\n `manim render [OPTIONS] [FILE] [SCENES]...`\n\n For example:\n New way - `manim -p -ql file.py SceneName1 SceneName2 ...`\n Old way - `manim file.py SceneName1 SceneName2 ... -p -ql`\n\n To see the help page for the new available options, run:\n `manim render -h`\n \"\"\"\n )\n )\n sys.exit()\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\"\n )\n renderer = \"opengl\"\n\n if args[\"use_webgl_renderer\"]:\n logger.warning(\n \"--use_webgl_renderer is deprecated, please use --renderer=webgl instead!\"\n )\n renderer = \"webgl\"\n\n if args[\"use_webgl_renderer\"] and args[\"use_opengl_renderer\"]:\n logger.warning(\"You may select only one renderer!\")\n sys.exit()\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = args[\"file\"]\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n for SceneClass in scene_classes_from_file(file):\n try:\n renderer = OpenGLRenderer()\n while True:\n scene_classes = scene_classes_from_file(file)\n SceneClass = scene_classes[0]\n scene = SceneClass(renderer)\n status = scene.render()\n if status:\n continue\n else:\n break\n except Exception:\n console.print_exception()\n elif config.renderer == \"webgl\":\n try:\n from manim.grpc.impl import frame_server_impl\n\n server = frame_server_impl.get(file)\n server.start()\n server.wait_for_termination()\n except ModuleNotFoundError:\n console.print(\n \"Dependencies for the WebGL render are missing. Run \"\n \"pip install manim[webgl_renderer] to install them.\"\n )\n console.print_exception()\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n scene = SceneClass()\n scene.render()\n except Exception:\n console.print_exception()\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n try:\n stable = req_info.json()[\"info\"][\"version\"]\n\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\"\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\"\n )\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}], "after_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nimport json\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, logger\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.exceptions import RerunSceneException\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options\n@ease_of_access_options\[email protected]_context\ndef render(\n ctx,\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n for scene in args[\"scene_names\"]:\n if str(scene).startswith(\"-\"):\n logger.warning(\n dedent(\n \"\"\"\\\n Manim Community has moved to Click for the CLI.\n\n This means that options in the CLI are provided BEFORE the positional\n arguments for your FILE and SCENE(s):\n `manim render [OPTIONS] [FILE] [SCENES]...`\n\n For example:\n New way - `manim -p -ql file.py SceneName1 SceneName2 ...`\n Old way - `manim file.py SceneName1 SceneName2 ... -p -ql`\n\n To see the help page for the new available options, run:\n `manim render -h`\n \"\"\"\n )\n )\n sys.exit()\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\"\n )\n renderer = \"opengl\"\n\n if args[\"use_webgl_renderer\"]:\n logger.warning(\n \"--use_webgl_renderer is deprecated, please use --renderer=webgl instead!\"\n )\n renderer = \"webgl\"\n\n if args[\"use_webgl_renderer\"] and args[\"use_opengl_renderer\"]:\n logger.warning(\"You may select only one renderer!\")\n sys.exit()\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = args[\"file\"]\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n for SceneClass in scene_classes_from_file(file):\n try:\n renderer = OpenGLRenderer()\n while True:\n scene_classes = scene_classes_from_file(file)\n SceneClass = scene_classes[0]\n scene = SceneClass(renderer)\n status = scene.render()\n if status:\n continue\n else:\n break\n except Exception:\n console.print_exception()\n elif config.renderer == \"webgl\":\n try:\n from manim.grpc.impl import frame_server_impl\n\n server = frame_server_impl.get(file)\n server.start()\n server.wait_for_termination()\n except ModuleNotFoundError:\n console.print(\n \"Dependencies for the WebGL render are missing. Run \"\n \"pip install manim[webgl_renderer] to install them.\"\n )\n console.print_exception()\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n scene = SceneClass()\n scene.render()\n except Exception:\n console.print_exception()\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n\n stable = req_info.json()[\"info\"][\"version\"]\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\"\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\"\n )\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}]} | 3,014 | 378 |
gh_patches_debug_12594 | rasdani/github-patches | git_diff | optuna__optuna-1600 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Kubernetes example of "simple" to avoid potential errors
https://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54
Like https://github.com/optuna/optuna/pull/1536, we should create a study before running the script.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/kubernetes/simple/sklearn_distributed.py`
Content:
```
1 """
2 Optuna distributed optimization example that optimizes
3 an sklearn classifier for the Iris dataset on Kubernetes.
4
5 This example's code is mostly the same as the sklearn_simple.py example, except for two things:
6
7 1 - It gives a name to the study and sets load_if_exists to True
8 in order to avoid errors when the code is run from multiple workers.
9
10 2 - It sets the storage address to the postgres pod deployed with the workers.
11
12 """
13 import os
14
15 import sklearn.datasets
16 import sklearn.ensemble
17 import sklearn.model_selection
18 import sklearn.svm
19
20 import optuna
21
22
23 # FYI: Objective functions can take additional arguments
24 # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
25 def objective(trial):
26 iris = sklearn.datasets.load_iris()
27 x, y = iris.data, iris.target
28
29 classifier_name = trial.suggest_categorical("classifier", ["SVC", "RandomForest"])
30 if classifier_name == "SVC":
31 svc_c = trial.suggest_float("svc_c", 1e-10, 1e10, log=True)
32 classifier_obj = sklearn.svm.SVC(C=svc_c, gamma="auto")
33 else:
34 rf_max_depth = trial.suggest_int("rf_max_depth", 2, 32, log=True)
35 classifier_obj = sklearn.ensemble.RandomForestClassifier(
36 max_depth=rf_max_depth, n_estimators=10
37 )
38
39 score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)
40 accuracy = score.mean()
41 return accuracy
42
43
44 if __name__ == "__main__":
45 study = optuna.create_study(
46 direction="maximize",
47 study_name="kubernetes",
48 storage="postgresql://{}:{}@postgres:5432/{}".format(
49 os.environ["POSTGRES_USER"],
50 os.environ["POSTGRES_PASSWORD"],
51 os.environ["POSTGRES_DB"],
52 ),
53 load_if_exists=True,
54 )
55 study.optimize(objective, n_trials=20)
56 print(study.best_trial)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py
--- a/examples/kubernetes/simple/sklearn_distributed.py
+++ b/examples/kubernetes/simple/sklearn_distributed.py
@@ -42,15 +42,13 @@
if __name__ == "__main__":
- study = optuna.create_study(
- direction="maximize",
+ study = optuna.load_study(
study_name="kubernetes",
storage="postgresql://{}:{}@postgres:5432/{}".format(
os.environ["POSTGRES_USER"],
os.environ["POSTGRES_PASSWORD"],
os.environ["POSTGRES_DB"],
),
- load_if_exists=True,
)
study.optimize(objective, n_trials=20)
print(study.best_trial)
| {"golden_diff": "diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py\n--- a/examples/kubernetes/simple/sklearn_distributed.py\n+++ b/examples/kubernetes/simple/sklearn_distributed.py\n@@ -42,15 +42,13 @@\n \n \n if __name__ == \"__main__\":\n- study = optuna.create_study(\n- direction=\"maximize\",\n+ study = optuna.load_study(\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n- load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "issue": "Update Kubernetes example of \"simple\" to avoid potential errors\nhttps://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54\r\n\r\nLike https://github.com/optuna/optuna/pull/1536, we should create a study before running the script.\n", "before_files": [{"content": "\"\"\"\nOptuna distributed optimization example that optimizes\nan sklearn classifier for the Iris dataset on Kubernetes.\n\nThis example's code is mostly the same as the sklearn_simple.py example, except for two things:\n\n1 - It gives a name to the study and sets load_if_exists to True\nin order to avoid errors when the code is run from multiple workers.\n\n2 - It sets the storage address to the postgres pod deployed with the workers.\n\n\"\"\"\nimport os\n\nimport sklearn.datasets\nimport sklearn.ensemble\nimport sklearn.model_selection\nimport sklearn.svm\n\nimport optuna\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n iris = sklearn.datasets.load_iris()\n x, y = iris.data, iris.target\n\n classifier_name = trial.suggest_categorical(\"classifier\", [\"SVC\", \"RandomForest\"])\n if classifier_name == \"SVC\":\n svc_c = trial.suggest_float(\"svc_c\", 1e-10, 1e10, log=True)\n classifier_obj = sklearn.svm.SVC(C=svc_c, gamma=\"auto\")\n else:\n rf_max_depth = trial.suggest_int(\"rf_max_depth\", 2, 32, log=True)\n classifier_obj = sklearn.ensemble.RandomForestClassifier(\n max_depth=rf_max_depth, n_estimators=10\n )\n\n score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)\n accuracy = score.mean()\n return accuracy\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(\n direction=\"maximize\",\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "path": "examples/kubernetes/simple/sklearn_distributed.py"}], "after_files": [{"content": "\"\"\"\nOptuna distributed optimization example that optimizes\nan sklearn classifier for the Iris dataset on Kubernetes.\n\nThis example's code is mostly the same as the sklearn_simple.py example, except for two things:\n\n1 - It gives a name to the study and sets load_if_exists to True\nin order to avoid errors when the code is run from multiple workers.\n\n2 - It sets the storage address to the postgres pod deployed with the workers.\n\n\"\"\"\nimport os\n\nimport sklearn.datasets\nimport sklearn.ensemble\nimport sklearn.model_selection\nimport sklearn.svm\n\nimport optuna\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n iris = sklearn.datasets.load_iris()\n x, y = iris.data, iris.target\n\n classifier_name = trial.suggest_categorical(\"classifier\", [\"SVC\", \"RandomForest\"])\n if classifier_name == \"SVC\":\n svc_c = trial.suggest_float(\"svc_c\", 1e-10, 1e10, log=True)\n classifier_obj = sklearn.svm.SVC(C=svc_c, gamma=\"auto\")\n else:\n rf_max_depth = trial.suggest_int(\"rf_max_depth\", 2, 32, log=True)\n classifier_obj = sklearn.ensemble.RandomForestClassifier(\n max_depth=rf_max_depth, n_estimators=10\n )\n\n score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)\n accuracy = score.mean()\n return accuracy\n\n\nif __name__ == \"__main__\":\n study = optuna.load_study(\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "path": "examples/kubernetes/simple/sklearn_distributed.py"}]} | 922 | 175 |
gh_patches_debug_24487 | rasdani/github-patches | git_diff | chainer__chainer-3032 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
inconsistency to use example module
I found some inconsistent behavior to import classes.
To use SOME_CLASS in chainer/iterators, the following code works.
```importIterator.py
import chainer
some_class = chainer.iterators.SOME_CLASS() # OK
```
However, to use SOME_CLASS in chainer/training/extensions, the following code does not work.
```importExtension.py
import chainer
some_class = chainer.training.extensions.SOME_CLASS() #NG
```
I feel some inconsistency here, because `chainer/__init__.py` contains both of `import iterator` and `import iterators` but `chaner/training/__init__.py` contains only `import extension` and not `import extensions`.
Is there any reason about this inconsistency?
* Conditions
- Chainer version
chainer v2.0.1
- OS/Platform
ubuntu 16.04
* Code to reproduce
```importModule.py
import chainer
iteratorExamples = chainer.iterators # OK
import chainer
extensionExamples = chainer.training.extensions #NG
```
* Error messages
```error.py
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'chainer.training' has no attribute 'extensions'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/training/__init__.py`
Content:
```
1 from chainer.training import extension # NOQA
2 from chainer.training import trainer # NOQA
3 from chainer.training import trigger # NOQA
4 from chainer.training import updater # NOQA
5
6
7 # import class and function
8 from chainer.training.extension import Extension # NOQA
9 from chainer.training.extension import make_extension # NOQA
10 from chainer.training.extension import PRIORITY_EDITOR # NOQA
11 from chainer.training.extension import PRIORITY_READER # NOQA
12 from chainer.training.extension import PRIORITY_WRITER # NOQA
13 from chainer.training.trainer import Trainer # NOQA
14 from chainer.training.trigger import get_trigger # NOQA
15 from chainer.training.trigger import IntervalTrigger # NOQA
16 from chainer.training.updater import ParallelUpdater # NOQA
17 from chainer.training.updater import StandardUpdater # NOQA
18 from chainer.training.updater import Updater # NOQA
19
```
Path: `chainer/training/extensions/parameter_statistics.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import reporter
5 from chainer import training
6 from chainer.training import extension
7
8
9 class ParameterStatistics(extension.Extension):
10 """Trainer extension to report parameter statistics.
11
12 Statistics are collected and reported for a given :class:`~chainer.Link`
13 or an iterable of :class:`~chainer.Link`s. If a link contains child links,
14 the statistics are reported separately for each child.
15
16 Any function that takes a one-dimensional :class:`numpy.ndarray` or a
17 :class:`cupy.ndarray` and outputs a single or multiple real numbers can be
18 registered to handle the collection of statistics, e.g.
19 :meth:`numpy.ndarray.mean`.
20
21 The keys of reported statistics follow the convention of link name
22 followed by parameter name, attribute name and function name, e.g.
23 ``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional
24 prefix and appended with integer indices if the statistics generating
25 function return multiple values.
26
27 Args:
28 links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing
29 the parameters to observe. The link is expected to have a ``name``
30 attribute which is used as a part of the report key.
31 statistics (dict): Dictionary with function name to function mappings.
32 The name is a string and is used as a part of the report key. The
33 function is responsible for generating the statistics.
34 report_params (bool): If ``True``, report statistics for parameter
35 values such as weights and biases.
36 report_grads (bool): If ``True``, report statistics for parameter
37 gradients.
38 prefix (str): Optional prefix to prepend to the report keys.
39 trigger: Trigger that decides when to aggregate the results and report
40 the values.
41 """
42 default_name = 'parameter_statistics'
43 priority = extension.PRIORITY_WRITER
44
45 # prefix ends with a '/' and param_name is preceded by a '/'
46 report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'
47 '{function_name}')
48
49 default_statistics = {
50 'mean': numpy.mean,
51 'std': numpy.std,
52 'min': numpy.min,
53 'max': numpy.max,
54 'zeros': lambda x: numpy.count_nonzero(x == 0),
55 'percentile': lambda x: numpy.percentile(x, (0.13, 2.28, 15.87,
56 50, 84.13, 97.72,
57 99.87))
58 }
59
60 def __init__(self, links, statistics=default_statistics,
61 report_params=True, report_grads=True, prefix=None,
62 trigger=(1, 'epoch')):
63
64 if not isinstance(links, (list, tuple)):
65 links = links,
66 self._links = links
67
68 self._statistics = statistics
69
70 attrs = []
71 if report_params:
72 attrs.append('data')
73 if report_grads:
74 attrs.append('grad')
75 self._attrs = attrs
76
77 self._prefix = prefix
78 self._trigger = training.trigger.get_trigger(trigger)
79 self._summary = reporter.DictSummary()
80
81 def __call__(self, trainer):
82 """Execute the statistics extension.
83
84 Collect statistics for the current state of parameters.
85
86 Note that this method will merely update its statistic summary, unless
87 the internal trigger is fired. If the trigger is fired, the summary
88 will also be reported and then reset for the next accumulation.
89
90 Args:
91 trainer (~chainer.training.Trainer): Associated trainer that
92 invoked this extension.
93 """
94 statistics = {}
95
96 for link in self._links:
97 link_name = getattr(link, 'name', 'None')
98 for param_name, param in link.namedparams():
99 for attr_name in self._attrs:
100 for function_name, function in \
101 six.iteritems(self._statistics):
102 # Get parameters as a flattend one-dimensional array
103 # since the statistics function should make no
104 # assumption about the axes
105 params = getattr(param, attr_name).ravel()
106 value = function(params)
107 key = self.report_key_template.format(
108 prefix=self._prefix + '/' if self._prefix else '',
109 link_name=link_name,
110 param_name=param_name,
111 attr_name=attr_name,
112 function_name=function_name
113 )
114 if hasattr(value, '__iter__'):
115 # Append integer indices to the keys if the
116 # statistic function return multiple values
117 statistics.update({'{}/{}'.format(key, i): v for
118 i, v in enumerate(value)})
119 else:
120 statistics[key] = value
121
122 self._summary.add(statistics)
123
124 if self._trigger(trainer):
125 reporter.report(self._summary.compute_mean())
126 self._summary = reporter.DictSummary() # Clear summary
127
128 def register_statistics(self, name, function):
129 """Register a function to compute a certain statistic.
130
131 The registered function will be called each time the extension runs and
132 the results will be included in the report.
133
134 Args:
135 name (str): Name of the statistic.
136 function: Function to generate the statistic. Any function that
137 takes a one-dimensional :class:`numpy.ndarray` or a
138 :class:`cupy.ndarray` and outputs a single or multiple real
139 numbers is allowed.
140 """
141 self._statistics[name] = function
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/training/__init__.py b/chainer/training/__init__.py
--- a/chainer/training/__init__.py
+++ b/chainer/training/__init__.py
@@ -1,7 +1,11 @@
from chainer.training import extension # NOQA
+from chainer.training import extensions # NOQA
from chainer.training import trainer # NOQA
from chainer.training import trigger # NOQA
+from chainer.training import triggers # NOQA
from chainer.training import updater # NOQA
+from chainer.training import updaters # NOQA
+from chainer.training import util # NOQA
# import class and function
diff --git a/chainer/training/extensions/parameter_statistics.py b/chainer/training/extensions/parameter_statistics.py
--- a/chainer/training/extensions/parameter_statistics.py
+++ b/chainer/training/extensions/parameter_statistics.py
@@ -2,8 +2,8 @@
import six
from chainer import reporter
-from chainer import training
from chainer.training import extension
+from chainer.training import trigger as trigger_module
class ParameterStatistics(extension.Extension):
@@ -75,7 +75,7 @@
self._attrs = attrs
self._prefix = prefix
- self._trigger = training.trigger.get_trigger(trigger)
+ self._trigger = trigger_module.get_trigger(trigger)
self._summary = reporter.DictSummary()
def __call__(self, trainer):
| {"golden_diff": "diff --git a/chainer/training/__init__.py b/chainer/training/__init__.py\n--- a/chainer/training/__init__.py\n+++ b/chainer/training/__init__.py\n@@ -1,7 +1,11 @@\n from chainer.training import extension # NOQA\n+from chainer.training import extensions # NOQA\n from chainer.training import trainer # NOQA\n from chainer.training import trigger # NOQA\n+from chainer.training import triggers # NOQA\n from chainer.training import updater # NOQA\n+from chainer.training import updaters # NOQA\n+from chainer.training import util # NOQA\n \n \n # import class and function\ndiff --git a/chainer/training/extensions/parameter_statistics.py b/chainer/training/extensions/parameter_statistics.py\n--- a/chainer/training/extensions/parameter_statistics.py\n+++ b/chainer/training/extensions/parameter_statistics.py\n@@ -2,8 +2,8 @@\n import six\n \n from chainer import reporter\n-from chainer import training\n from chainer.training import extension\n+from chainer.training import trigger as trigger_module\n \n \n class ParameterStatistics(extension.Extension):\n@@ -75,7 +75,7 @@\n self._attrs = attrs\n \n self._prefix = prefix\n- self._trigger = training.trigger.get_trigger(trigger)\n+ self._trigger = trigger_module.get_trigger(trigger)\n self._summary = reporter.DictSummary()\n \n def __call__(self, trainer):\n", "issue": "inconsistency to use example module\nI found some inconsistent behavior to import classes.\r\n\r\nTo use SOME_CLASS in chainer/iterators, the following code works.\r\n```importIterator.py\r\nimport chainer\r\nsome_class = chainer.iterators.SOME_CLASS() # OK\r\n```\r\nHowever, to use SOME_CLASS in chainer/training/extensions, the following code does not work.\r\n```importExtension.py\r\nimport chainer\r\nsome_class = chainer.training.extensions.SOME_CLASS() #NG\r\n```\r\n\r\nI feel some inconsistency here, because `chainer/__init__.py` contains both of `import iterator` and `import iterators` but `chaner/training/__init__.py` contains only `import extension` and not `import extensions`.\r\n\r\nIs there any reason about this inconsistency?\r\n\r\n* Conditions\r\n - Chainer version\r\n chainer v2.0.1\r\n - OS/Platform\r\n ubuntu 16.04\r\n* Code to reproduce\r\n```importModule.py\r\nimport chainer\r\niteratorExamples = chainer.iterators # OK\r\n\r\nimport chainer\r\nextensionExamples = chainer.training.extensions #NG\r\n```\r\n\r\n* Error messages\r\n```error.py\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nAttributeError: module 'chainer.training' has no attribute 'extensions'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "from chainer.training import extension # NOQA\nfrom chainer.training import trainer # NOQA\nfrom chainer.training import trigger # NOQA\nfrom chainer.training import updater # NOQA\n\n\n# import class and function\nfrom chainer.training.extension import Extension # NOQA\nfrom chainer.training.extension import make_extension # NOQA\nfrom chainer.training.extension import PRIORITY_EDITOR # NOQA\nfrom chainer.training.extension import PRIORITY_READER # NOQA\nfrom chainer.training.extension import PRIORITY_WRITER # NOQA\nfrom chainer.training.trainer import Trainer # NOQA\nfrom chainer.training.trigger import get_trigger # NOQA\nfrom chainer.training.trigger import IntervalTrigger # NOQA\nfrom chainer.training.updater import ParallelUpdater # NOQA\nfrom chainer.training.updater import StandardUpdater # NOQA\nfrom chainer.training.updater import Updater # NOQA\n", "path": "chainer/training/__init__.py"}, {"content": "import numpy\nimport six\n\nfrom chainer import reporter\nfrom chainer import training\nfrom chainer.training import extension\n\n\nclass ParameterStatistics(extension.Extension):\n \"\"\"Trainer extension to report parameter statistics.\n\n Statistics are collected and reported for a given :class:`~chainer.Link`\n or an iterable of :class:`~chainer.Link`s. If a link contains child links,\n the statistics are reported separately for each child.\n\n Any function that takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real numbers can be\n registered to handle the collection of statistics, e.g.\n :meth:`numpy.ndarray.mean`.\n\n The keys of reported statistics follow the convention of link name\n followed by parameter name, attribute name and function name, e.g.\n ``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional\n prefix and appended with integer indices if the statistics generating\n function return multiple values.\n\n Args:\n links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing\n the parameters to observe. The link is expected to have a ``name``\n attribute which is used as a part of the report key.\n statistics (dict): Dictionary with function name to function mappings.\n The name is a string and is used as a part of the report key. The\n function is responsible for generating the statistics.\n report_params (bool): If ``True``, report statistics for parameter\n values such as weights and biases.\n report_grads (bool): If ``True``, report statistics for parameter\n gradients.\n prefix (str): Optional prefix to prepend to the report keys.\n trigger: Trigger that decides when to aggregate the results and report\n the values.\n \"\"\"\n default_name = 'parameter_statistics'\n priority = extension.PRIORITY_WRITER\n\n # prefix ends with a '/' and param_name is preceded by a '/'\n report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'\n '{function_name}')\n\n default_statistics = {\n 'mean': numpy.mean,\n 'std': numpy.std,\n 'min': numpy.min,\n 'max': numpy.max,\n 'zeros': lambda x: numpy.count_nonzero(x == 0),\n 'percentile': lambda x: numpy.percentile(x, (0.13, 2.28, 15.87,\n 50, 84.13, 97.72,\n 99.87))\n }\n\n def __init__(self, links, statistics=default_statistics,\n report_params=True, report_grads=True, prefix=None,\n trigger=(1, 'epoch')):\n\n if not isinstance(links, (list, tuple)):\n links = links,\n self._links = links\n\n self._statistics = statistics\n\n attrs = []\n if report_params:\n attrs.append('data')\n if report_grads:\n attrs.append('grad')\n self._attrs = attrs\n\n self._prefix = prefix\n self._trigger = training.trigger.get_trigger(trigger)\n self._summary = reporter.DictSummary()\n\n def __call__(self, trainer):\n \"\"\"Execute the statistics extension.\n\n Collect statistics for the current state of parameters.\n\n Note that this method will merely update its statistic summary, unless\n the internal trigger is fired. If the trigger is fired, the summary\n will also be reported and then reset for the next accumulation.\n\n Args:\n trainer (~chainer.training.Trainer): Associated trainer that\n invoked this extension.\n \"\"\"\n statistics = {}\n\n for link in self._links:\n link_name = getattr(link, 'name', 'None')\n for param_name, param in link.namedparams():\n for attr_name in self._attrs:\n for function_name, function in \\\n six.iteritems(self._statistics):\n # Get parameters as a flattend one-dimensional array\n # since the statistics function should make no\n # assumption about the axes\n params = getattr(param, attr_name).ravel()\n value = function(params)\n key = self.report_key_template.format(\n prefix=self._prefix + '/' if self._prefix else '',\n link_name=link_name,\n param_name=param_name,\n attr_name=attr_name,\n function_name=function_name\n )\n if hasattr(value, '__iter__'):\n # Append integer indices to the keys if the\n # statistic function return multiple values\n statistics.update({'{}/{}'.format(key, i): v for\n i, v in enumerate(value)})\n else:\n statistics[key] = value\n\n self._summary.add(statistics)\n\n if self._trigger(trainer):\n reporter.report(self._summary.compute_mean())\n self._summary = reporter.DictSummary() # Clear summary\n\n def register_statistics(self, name, function):\n \"\"\"Register a function to compute a certain statistic.\n\n The registered function will be called each time the extension runs and\n the results will be included in the report.\n\n Args:\n name (str): Name of the statistic.\n function: Function to generate the statistic. Any function that\n takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real\n numbers is allowed.\n \"\"\"\n self._statistics[name] = function\n", "path": "chainer/training/extensions/parameter_statistics.py"}], "after_files": [{"content": "from chainer.training import extension # NOQA\nfrom chainer.training import extensions # NOQA\nfrom chainer.training import trainer # NOQA\nfrom chainer.training import trigger # NOQA\nfrom chainer.training import triggers # NOQA\nfrom chainer.training import updater # NOQA\nfrom chainer.training import updaters # NOQA\nfrom chainer.training import util # NOQA\n\n\n# import class and function\nfrom chainer.training.extension import Extension # NOQA\nfrom chainer.training.extension import make_extension # NOQA\nfrom chainer.training.extension import PRIORITY_EDITOR # NOQA\nfrom chainer.training.extension import PRIORITY_READER # NOQA\nfrom chainer.training.extension import PRIORITY_WRITER # NOQA\nfrom chainer.training.trainer import Trainer # NOQA\nfrom chainer.training.trigger import get_trigger # NOQA\nfrom chainer.training.trigger import IntervalTrigger # NOQA\nfrom chainer.training.updater import ParallelUpdater # NOQA\nfrom chainer.training.updater import StandardUpdater # NOQA\nfrom chainer.training.updater import Updater # NOQA\n", "path": "chainer/training/__init__.py"}, {"content": "import numpy\nimport six\n\nfrom chainer import reporter\nfrom chainer.training import extension\nfrom chainer.training import trigger as trigger_module\n\n\nclass ParameterStatistics(extension.Extension):\n \"\"\"Trainer extension to report parameter statistics.\n\n Statistics are collected and reported for a given :class:`~chainer.Link`\n or an iterable of :class:`~chainer.Link`s. If a link contains child links,\n the statistics are reported separately for each child.\n\n Any function that takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real numbers can be\n registered to handle the collection of statistics, e.g.\n :meth:`numpy.ndarray.mean`.\n\n The keys of reported statistics follow the convention of link name\n followed by parameter name, attribute name and function name, e.g.\n ``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional\n prefix and appended with integer indices if the statistics generating\n function return multiple values.\n\n Args:\n links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing\n the parameters to observe. The link is expected to have a ``name``\n attribute which is used as a part of the report key.\n statistics (dict): Dictionary with function name to function mappings.\n The name is a string and is used as a part of the report key. The\n function is responsible for generating the statistics.\n report_params (bool): If ``True``, report statistics for parameter\n values such as weights and biases.\n report_grads (bool): If ``True``, report statistics for parameter\n gradients.\n prefix (str): Optional prefix to prepend to the report keys.\n trigger: Trigger that decides when to aggregate the results and report\n the values.\n \"\"\"\n default_name = 'parameter_statistics'\n priority = extension.PRIORITY_WRITER\n\n # prefix ends with a '/' and param_name is preceded by a '/'\n report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'\n '{function_name}')\n\n default_statistics = {\n 'mean': numpy.mean,\n 'std': numpy.std,\n 'min': numpy.min,\n 'max': numpy.max,\n 'zeros': lambda x: numpy.count_nonzero(x == 0),\n 'percentile': lambda x: numpy.percentile(x, (0.13, 2.28, 15.87,\n 50, 84.13, 97.72,\n 99.87))\n }\n\n def __init__(self, links, statistics=default_statistics,\n report_params=True, report_grads=True, prefix=None,\n trigger=(1, 'epoch')):\n\n if not isinstance(links, (list, tuple)):\n links = links,\n self._links = links\n\n self._statistics = statistics\n\n attrs = []\n if report_params:\n attrs.append('data')\n if report_grads:\n attrs.append('grad')\n self._attrs = attrs\n\n self._prefix = prefix\n self._trigger = trigger_module.get_trigger(trigger)\n self._summary = reporter.DictSummary()\n\n def __call__(self, trainer):\n \"\"\"Execute the statistics extension.\n\n Collect statistics for the current state of parameters.\n\n Note that this method will merely update its statistic summary, unless\n the internal trigger is fired. If the trigger is fired, the summary\n will also be reported and then reset for the next accumulation.\n\n Args:\n trainer (~chainer.training.Trainer): Associated trainer that\n invoked this extension.\n \"\"\"\n statistics = {}\n\n for link in self._links:\n link_name = getattr(link, 'name', 'None')\n for param_name, param in link.namedparams():\n for attr_name in self._attrs:\n for function_name, function in \\\n six.iteritems(self._statistics):\n # Get parameters as a flattend one-dimensional array\n # since the statistics function should make no\n # assumption about the axes\n params = getattr(param, attr_name).ravel()\n value = function(params)\n key = self.report_key_template.format(\n prefix=self._prefix + '/' if self._prefix else '',\n link_name=link_name,\n param_name=param_name,\n attr_name=attr_name,\n function_name=function_name\n )\n if hasattr(value, '__iter__'):\n # Append integer indices to the keys if the\n # statistic function return multiple values\n statistics.update({'{}/{}'.format(key, i): v for\n i, v in enumerate(value)})\n else:\n statistics[key] = value\n\n self._summary.add(statistics)\n\n if self._trigger(trainer):\n reporter.report(self._summary.compute_mean())\n self._summary = reporter.DictSummary() # Clear summary\n\n def register_statistics(self, name, function):\n \"\"\"Register a function to compute a certain statistic.\n\n The registered function will be called each time the extension runs and\n the results will be included in the report.\n\n Args:\n name (str): Name of the statistic.\n function: Function to generate the statistic. Any function that\n takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real\n numbers is allowed.\n \"\"\"\n self._statistics[name] = function\n", "path": "chainer/training/extensions/parameter_statistics.py"}]} | 2,281 | 323 |
gh_patches_debug_27870 | rasdani/github-patches | git_diff | Gallopsled__pwntools-2427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undocumented behaviour of sni kwarg of remote()
In `remote()` 'server_hostname' ssl arg can be set like this:
```python
remote('172.22.0.2', 9090, ssl=True, sni='example.com')
```
This behaviour of `sni` kwarg is undocumented. Currently only behaviour of passing a bool in documented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/tubes/remote.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import socket
5 import socks
6
7 from pwnlib.log import getLogger
8 from pwnlib.timeout import Timeout
9 from pwnlib.tubes.sock import sock
10
11 log = getLogger(__name__)
12
13 class remote(sock):
14 r"""Creates a TCP or UDP-connection to a remote host. It supports
15 both IPv4 and IPv6.
16
17 The returned object supports all the methods from
18 :class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
19
20 Arguments:
21 host(str): The host to connect to.
22 port(int): The port to connect to.
23 fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
24 typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
25 timeout: A positive number, None or the string "default".
26 ssl(bool): Wrap the socket with SSL
27 ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.
28 sni: Set 'server_hostname' in ssl_args based on the host parameter.
29 sock(socket.socket): Socket to inherit, rather than connecting
30 ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.
31
32 Examples:
33
34 >>> r = remote('google.com', 443, ssl=True)
35 >>> r.send(b'GET /\r\n\r\n')
36 >>> r.recvn(4)
37 b'HTTP'
38
39 If a connection cannot be made, an exception is raised.
40
41 >>> r = remote('127.0.0.1', 1)
42 Traceback (most recent call last):
43 ...
44 PwnlibException: Could not connect to 127.0.0.1 on port 1
45
46 You can also use :meth:`.remote.fromsocket` to wrap an existing socket.
47
48 >>> import socket
49 >>> s = socket.socket()
50 >>> s.connect(('google.com', 80))
51 >>> s.send(b'GET /' + b'\r\n'*2)
52 9
53 >>> r = remote.fromsocket(s)
54 >>> r.recvn(4)
55 b'HTTP'
56 """
57
58 def __init__(self, host, port,
59 fam = "any", typ = "tcp",
60 ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,
61 *args, **kwargs):
62 super(remote, self).__init__(*args, **kwargs)
63
64 # convert port to string for sagemath support
65 self.rport = str(port)
66 self.rhost = host
67
68 if sock:
69 self.family = sock.family
70 self.type = sock.type
71 self.proto = sock.proto
72 self.sock = sock
73
74 else:
75 typ = self._get_type(typ)
76 fam = self._get_family(fam)
77 try:
78 self.sock = self._connect(fam, typ)
79 except socket.gaierror as e:
80 if e.errno != socket.EAI_NONAME:
81 raise
82 self.error('Could not resolve hostname: %r', host)
83 if self.sock:
84 self.settimeout(self.timeout)
85 self.lhost, self.lport = self.sock.getsockname()[:2]
86
87 if ssl:
88 # Deferred import to save startup time
89 import ssl as _ssl
90
91 ssl_args = ssl_args or {}
92 ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)
93 if isinstance(sni, str):
94 ssl_args["server_hostname"] = sni
95 elif sni:
96 ssl_args["server_hostname"] = host
97 self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)
98
99 def _connect(self, fam, typ):
100 sock = None
101 timeout = self.timeout
102
103 with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:
104 for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):
105 self.family, self.type, self.proto, _canonname, sockaddr = res
106
107 if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
108 continue
109
110 h.status("Trying %s", sockaddr[0])
111
112 sock = socket.socket(self.family, self.type, self.proto)
113
114 if timeout is not None and timeout <= 0:
115 sock.setblocking(0)
116 else:
117 sock.setblocking(1)
118 sock.settimeout(timeout)
119
120 try:
121 sock.connect(sockaddr)
122 return sock
123 except socks.ProxyError:
124 raise
125 except socket.error:
126 pass
127 self.error("Could not connect to %s on port %s", self.rhost, self.rport)
128
129 @classmethod
130 def fromsocket(cls, socket):
131 """
132 Helper method to wrap a standard python socket.socket with the
133 tube APIs.
134
135 Arguments:
136 socket: Instance of socket.socket
137
138 Returns:
139 Instance of pwnlib.tubes.remote.remote.
140 """
141 s = socket
142 host, port = s.getpeername()
143 return remote(host, port, fam=s.family, typ=s.type, sock=s)
144
145 class tcp(remote):
146 __doc__ = remote.__doc__
147 def __init__(self, host, port, *a, **kw):
148 return super(tcp, self).__init__(host, port, typ="tcp", *a, **kw)
149
150 class udp(remote):
151 __doc__ = remote.__doc__
152 def __init__(self, host, port, *a, **kw):
153 return super(udp, self).__init__(host, port, typ="udp", *a, **kw)
154
155 class connect(remote):
156 __doc__ = remote.__doc__
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/tubes/remote.py b/pwnlib/tubes/remote.py
--- a/pwnlib/tubes/remote.py
+++ b/pwnlib/tubes/remote.py
@@ -23,11 +23,11 @@
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
timeout: A positive number, None or the string "default".
+ sock(:class:`socket.socket`): Socket to inherit, rather than connecting
ssl(bool): Wrap the socket with SSL
ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.
- sni: Set 'server_hostname' in ssl_args based on the host parameter.
- sock(socket.socket): Socket to inherit, rather than connecting
- ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.
+ ssl_args(dict): Pass :func:`ssl.wrap_socket` named arguments in a dictionary.
+ sni(str,bool): Set 'server_hostname' in ssl_args. Set to True to set it based on the host argument. Set to False to not provide any value. Default is True.
Examples:
@@ -57,7 +57,7 @@
def __init__(self, host, port,
fam = "any", typ = "tcp",
- ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,
+ sock=None, ssl=False, ssl_context=None, ssl_args=None, sni=True,
*args, **kwargs):
super(remote, self).__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/pwnlib/tubes/remote.py b/pwnlib/tubes/remote.py\n--- a/pwnlib/tubes/remote.py\n+++ b/pwnlib/tubes/remote.py\n@@ -23,11 +23,11 @@\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n timeout: A positive number, None or the string \"default\".\n+ sock(:class:`socket.socket`): Socket to inherit, rather than connecting\n ssl(bool): Wrap the socket with SSL\n ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.\n- sni: Set 'server_hostname' in ssl_args based on the host parameter.\n- sock(socket.socket): Socket to inherit, rather than connecting\n- ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.\n+ ssl_args(dict): Pass :func:`ssl.wrap_socket` named arguments in a dictionary.\n+ sni(str,bool): Set 'server_hostname' in ssl_args. Set to True to set it based on the host argument. Set to False to not provide any value. Default is True.\n \n Examples:\n \n@@ -57,7 +57,7 @@\n \n def __init__(self, host, port,\n fam = \"any\", typ = \"tcp\",\n- ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,\n+ sock=None, ssl=False, ssl_context=None, ssl_args=None, sni=True,\n *args, **kwargs):\n super(remote, self).__init__(*args, **kwargs)\n", "issue": "Undocumented behaviour of sni kwarg of remote()\nIn `remote()` 'server_hostname' ssl arg can be set like this:\r\n```python\r\nremote('172.22.0.2', 9090, ssl=True, sni='example.com')\r\n```\r\nThis behaviour of `sni` kwarg is undocumented. Currently only behaviour of passing a bool in documented.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport socket\nimport socks\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.sock import sock\n\nlog = getLogger(__name__)\n\nclass remote(sock):\n r\"\"\"Creates a TCP or UDP-connection to a remote host. It supports\n both IPv4 and IPv6.\n\n The returned object supports all the methods from\n :class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.\n\n Arguments:\n host(str): The host to connect to.\n port(int): The port to connect to.\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n timeout: A positive number, None or the string \"default\".\n ssl(bool): Wrap the socket with SSL\n ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.\n sni: Set 'server_hostname' in ssl_args based on the host parameter.\n sock(socket.socket): Socket to inherit, rather than connecting\n ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.\n\n Examples:\n\n >>> r = remote('google.com', 443, ssl=True)\n >>> r.send(b'GET /\\r\\n\\r\\n')\n >>> r.recvn(4)\n b'HTTP'\n\n If a connection cannot be made, an exception is raised.\n\n >>> r = remote('127.0.0.1', 1)\n Traceback (most recent call last):\n ...\n PwnlibException: Could not connect to 127.0.0.1 on port 1\n\n You can also use :meth:`.remote.fromsocket` to wrap an existing socket.\n\n >>> import socket\n >>> s = socket.socket()\n >>> s.connect(('google.com', 80))\n >>> s.send(b'GET /' + b'\\r\\n'*2)\n 9\n >>> r = remote.fromsocket(s)\n >>> r.recvn(4)\n b'HTTP'\n \"\"\"\n\n def __init__(self, host, port,\n fam = \"any\", typ = \"tcp\",\n ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,\n *args, **kwargs):\n super(remote, self).__init__(*args, **kwargs)\n\n # convert port to string for sagemath support\n self.rport = str(port)\n self.rhost = host\n\n if sock:\n self.family = sock.family\n self.type = sock.type\n self.proto = sock.proto\n self.sock = sock\n\n else:\n typ = self._get_type(typ)\n fam = self._get_family(fam)\n try:\n self.sock = self._connect(fam, typ)\n except socket.gaierror as e:\n if e.errno != socket.EAI_NONAME:\n raise\n self.error('Could not resolve hostname: %r', host)\n if self.sock:\n self.settimeout(self.timeout)\n self.lhost, self.lport = self.sock.getsockname()[:2]\n\n if ssl:\n # Deferred import to save startup time\n import ssl as _ssl\n\n ssl_args = ssl_args or {}\n ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)\n if isinstance(sni, str):\n ssl_args[\"server_hostname\"] = sni\n elif sni:\n ssl_args[\"server_hostname\"] = host\n self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)\n\n def _connect(self, fam, typ):\n sock = None\n timeout = self.timeout\n\n with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:\n for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):\n self.family, self.type, self.proto, _canonname, sockaddr = res\n\n if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:\n continue\n\n h.status(\"Trying %s\", sockaddr[0])\n\n sock = socket.socket(self.family, self.type, self.proto)\n\n if timeout is not None and timeout <= 0:\n sock.setblocking(0)\n else:\n sock.setblocking(1)\n sock.settimeout(timeout)\n\n try:\n sock.connect(sockaddr)\n return sock\n except socks.ProxyError:\n raise\n except socket.error:\n pass\n self.error(\"Could not connect to %s on port %s\", self.rhost, self.rport)\n\n @classmethod\n def fromsocket(cls, socket):\n \"\"\"\n Helper method to wrap a standard python socket.socket with the\n tube APIs.\n\n Arguments:\n socket: Instance of socket.socket\n\n Returns:\n Instance of pwnlib.tubes.remote.remote.\n \"\"\"\n s = socket\n host, port = s.getpeername()\n return remote(host, port, fam=s.family, typ=s.type, sock=s)\n\nclass tcp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(tcp, self).__init__(host, port, typ=\"tcp\", *a, **kw)\n\nclass udp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(udp, self).__init__(host, port, typ=\"udp\", *a, **kw)\n\nclass connect(remote):\n __doc__ = remote.__doc__\n", "path": "pwnlib/tubes/remote.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport socket\nimport socks\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.sock import sock\n\nlog = getLogger(__name__)\n\nclass remote(sock):\n r\"\"\"Creates a TCP or UDP-connection to a remote host. It supports\n both IPv4 and IPv6.\n\n The returned object supports all the methods from\n :class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.\n\n Arguments:\n host(str): The host to connect to.\n port(int): The port to connect to.\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n timeout: A positive number, None or the string \"default\".\n sock(:class:`socket.socket`): Socket to inherit, rather than connecting\n ssl(bool): Wrap the socket with SSL\n ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.\n ssl_args(dict): Pass :func:`ssl.wrap_socket` named arguments in a dictionary.\n sni(str,bool): Set 'server_hostname' in ssl_args. Set to True to set it based on the host argument. Set to False to not provide any value. Default is True.\n\n Examples:\n\n >>> r = remote('google.com', 443, ssl=True)\n >>> r.send(b'GET /\\r\\n\\r\\n')\n >>> r.recvn(4)\n b'HTTP'\n\n If a connection cannot be made, an exception is raised.\n\n >>> r = remote('127.0.0.1', 1)\n Traceback (most recent call last):\n ...\n PwnlibException: Could not connect to 127.0.0.1 on port 1\n\n You can also use :meth:`.remote.fromsocket` to wrap an existing socket.\n\n >>> import socket\n >>> s = socket.socket()\n >>> s.connect(('google.com', 80))\n >>> s.send(b'GET /' + b'\\r\\n'*2)\n 9\n >>> r = remote.fromsocket(s)\n >>> r.recvn(4)\n b'HTTP'\n \"\"\"\n\n def __init__(self, host, port,\n fam = \"any\", typ = \"tcp\",\n sock=None, ssl=False, ssl_context=None, ssl_args=None, sni=True,\n *args, **kwargs):\n super(remote, self).__init__(*args, **kwargs)\n\n # convert port to string for sagemath support\n self.rport = str(port)\n self.rhost = host\n\n if sock:\n self.family = sock.family\n self.type = sock.type\n self.proto = sock.proto\n self.sock = sock\n\n else:\n typ = self._get_type(typ)\n fam = self._get_family(fam)\n try:\n self.sock = self._connect(fam, typ)\n except socket.gaierror as e:\n if e.errno != socket.EAI_NONAME:\n raise\n self.error('Could not resolve hostname: %r', host)\n if self.sock:\n self.settimeout(self.timeout)\n self.lhost, self.lport = self.sock.getsockname()[:2]\n\n if ssl:\n # Deferred import to save startup time\n import ssl as _ssl\n\n ssl_args = ssl_args or {}\n ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)\n if isinstance(sni, str):\n ssl_args[\"server_hostname\"] = sni\n elif sni:\n ssl_args[\"server_hostname\"] = host\n self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)\n\n def _connect(self, fam, typ):\n sock = None\n timeout = self.timeout\n\n with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:\n for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):\n self.family, self.type, self.proto, _canonname, sockaddr = res\n\n if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:\n continue\n\n h.status(\"Trying %s\", sockaddr[0])\n\n sock = socket.socket(self.family, self.type, self.proto)\n\n if timeout is not None and timeout <= 0:\n sock.setblocking(0)\n else:\n sock.setblocking(1)\n sock.settimeout(timeout)\n\n try:\n sock.connect(sockaddr)\n return sock\n except socks.ProxyError:\n raise\n except socket.error:\n pass\n self.error(\"Could not connect to %s on port %s\", self.rhost, self.rport)\n\n @classmethod\n def fromsocket(cls, socket):\n \"\"\"\n Helper method to wrap a standard python socket.socket with the\n tube APIs.\n\n Arguments:\n socket: Instance of socket.socket\n\n Returns:\n Instance of pwnlib.tubes.remote.remote.\n \"\"\"\n s = socket\n host, port = s.getpeername()\n return remote(host, port, fam=s.family, typ=s.type, sock=s)\n\nclass tcp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(tcp, self).__init__(host, port, typ=\"tcp\", *a, **kw)\n\nclass udp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(udp, self).__init__(host, port, typ=\"udp\", *a, **kw)\n\nclass connect(remote):\n __doc__ = remote.__doc__\n", "path": "pwnlib/tubes/remote.py"}]} | 1,993 | 385 |
gh_patches_debug_32274 | rasdani/github-patches | git_diff | getredash__redash-2069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The API key for one query may be used to retrieve another query's results
### Issue Summary
A query's API key may be used to obtain another query's results via the REST api when including the API key in the query string.
### Steps to Reproduce
1. Create one query and execute it to obtain results (call it query A)
2. Create another query and execute it to obtain different results (call it query B)
3. Get the query's API key for query A (A_API_KEY) and the query number for query A (A_QUERY_NUMBER)
4. Get the result number from query B's most recent run (B_RESULT_NUMBER)
5. Execute the below code and you'll see that the API key for query A can get results from query B
```bash
REDASH_DOMAIN='yourdomain.com'
A_QUERY_NUMBER='query number for query A'
A_API_KEY_A='api key for query A'
B_RESULT_NUMBER='query result number for query b'
# this will download query B's results using query A's access key
wget \
-O query_b_results.csv \
"https://$REDASH_DOMAIN/api/queries/$A_QUERY_NUMBER/results/$B_RESULT_NUMBER.csv?api_key=$A_API_KEY"
```
This is a bug because one query's API key should NOT be able to access another query's results.
### Technical details:
* Redash Version: 1.0.3
* Browser/OS: (Command Line) / Linux Mint 18.2
* How did you install Redash: Command line
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/query_results.py`
Content:
```
1 import logging
2 import json
3 import time
4
5 import pystache
6 from flask import make_response, request
7 from flask_login import current_user
8 from flask_restful import abort
9 from redash import models, settings, utils
10 from redash.tasks import QueryTask, record_event
11 from redash.permissions import require_permission, not_view_only, has_access, require_access, view_only
12 from redash.handlers.base import BaseResource, get_object_or_404
13 from redash.utils import collect_query_parameters, collect_parameters_from_request, gen_query_hash
14 from redash.tasks.queries import enqueue_query
15
16
17 def error_response(message):
18 return {'job': {'status': 4, 'error': message}}, 400
19
20
21 #
22 # Run a parameterized query synchronously and return the result
23 # DISCLAIMER: Temporary solution to support parameters in queries. Should be
24 # removed once we refactor the query results API endpoints and handling
25 # on the client side. Please don't reuse in other API handlers.
26 #
27 def run_query_sync(data_source, parameter_values, query_text, max_age=0):
28 query_parameters = set(collect_query_parameters(query_text))
29 missing_params = set(query_parameters) - set(parameter_values.keys())
30 if missing_params:
31 raise Exception('Missing parameter value for: {}'.format(", ".join(missing_params)))
32
33 if query_parameters:
34 query_text = pystache.render(query_text, parameter_values)
35
36 if max_age <= 0:
37 query_result = None
38 else:
39 query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
40
41 query_hash = gen_query_hash(query_text)
42
43 if query_result:
44 logging.info("Returning cached result for query %s" % query_hash)
45 return query_result
46
47 try:
48 started_at = time.time()
49 data, error = data_source.query_runner.run_query(query_text, current_user)
50
51 if error:
52 logging.info('got bak error')
53 logging.info(error)
54 return None
55
56 run_time = time.time() - started_at
57 query_result, updated_query_ids = models.QueryResult.store_result(data_source.org, data_source,
58 query_hash, query_text, data,
59 run_time, utils.utcnow())
60
61 models.db.session.commit()
62 return query_result
63 except Exception as e:
64 if max_age > 0:
65 abort(404, message="Unable to get result from the database, and no cached query result found.")
66 else:
67 abort(503, message="Unable to get result from the database.")
68 return None
69
70 def run_query(data_source, parameter_values, query_text, query_id, max_age=0):
71 query_parameters = set(collect_query_parameters(query_text))
72 missing_params = set(query_parameters) - set(parameter_values.keys())
73 if missing_params:
74 return error_response('Missing parameter value for: {}'.format(", ".join(missing_params)))
75
76 if data_source.paused:
77 if data_source.pause_reason:
78 message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)
79 else:
80 message = '{} is paused. Please try later.'.format(data_source.name)
81
82 return error_response(message)
83
84 if query_parameters:
85 query_text = pystache.render(query_text, parameter_values)
86
87 if max_age == 0:
88 query_result = None
89 else:
90 query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
91
92 if query_result:
93 return {'query_result': query_result.to_dict()}
94 else:
95 job = enqueue_query(query_text, data_source, current_user.id, metadata={"Username": current_user.email, "Query ID": query_id})
96 return {'job': job.to_dict()}
97
98
99 class QueryResultListResource(BaseResource):
100 @require_permission('execute_query')
101 def post(self):
102 """
103 Execute a query (or retrieve recent results).
104
105 :qparam string query: The query text to execute
106 :qparam number query_id: The query object to update with the result (optional)
107 :qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute
108 :qparam number data_source_id: ID of data source to query
109 """
110 params = request.get_json(force=True)
111 parameter_values = collect_parameters_from_request(request.args)
112
113 query = params['query']
114 max_age = int(params.get('max_age', -1))
115 query_id = params.get('query_id', 'adhoc')
116
117 data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)
118
119 if not has_access(data_source.groups, self.current_user, not_view_only):
120 return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403
121
122 self.record_event({
123 'action': 'execute_query',
124 'timestamp': int(time.time()),
125 'object_id': data_source.id,
126 'object_type': 'data_source',
127 'query': query
128 })
129 return run_query(data_source, parameter_values, query, query_id, max_age)
130
131
132 ONE_YEAR = 60 * 60 * 24 * 365.25
133
134
135 class QueryResultResource(BaseResource):
136 @staticmethod
137 def add_cors_headers(headers):
138 if 'Origin' in request.headers:
139 origin = request.headers['Origin']
140
141 if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:
142 headers['Access-Control-Allow-Origin'] = origin
143 headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()
144
145 @require_permission('view_query')
146 def options(self, query_id=None, query_result_id=None, filetype='json'):
147 headers = {}
148 self.add_cors_headers(headers)
149
150 if settings.ACCESS_CONTROL_REQUEST_METHOD:
151 headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD
152
153 if settings.ACCESS_CONTROL_ALLOW_HEADERS:
154 headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS
155
156 return make_response("", 200, headers)
157
158 @require_permission('view_query')
159 def get(self, query_id=None, query_result_id=None, filetype='json'):
160 """
161 Retrieve query results.
162
163 :param number query_id: The ID of the query whose results should be fetched
164 :param number query_result_id: the ID of the query result to fetch
165 :param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.
166
167 :<json number id: Query result ID
168 :<json string query: Query that produced this result
169 :<json string query_hash: Hash code for query text
170 :<json object data: Query output
171 :<json number data_source_id: ID of data source that produced this result
172 :<json number runtime: Length of execution time in seconds
173 :<json string retrieved_at: Query retrieval date/time, in ISO format
174 """
175 # TODO:
176 # This method handles two cases: retrieving result by id & retrieving result by query id.
177 # They need to be split, as they have different logic (for example, retrieving by query id
178 # should check for query parameters and shouldn't cache the result).
179 should_cache = query_result_id is not None
180
181 parameter_values = collect_parameters_from_request(request.args)
182 max_age = int(request.args.get('maxAge', 0))
183
184 query_result = None
185
186 if query_result_id:
187 query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
188 elif query_id is not None:
189 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
190
191 if query is not None:
192 if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:
193 query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)
194 elif query.latest_query_data_id is not None:
195 query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)
196
197 if query_result:
198 require_access(query_result.data_source.groups, self.current_user, view_only)
199
200 if isinstance(self.current_user, models.ApiUser):
201 event = {
202 'user_id': None,
203 'org_id': self.current_org.id,
204 'action': 'api_get',
205 'timestamp': int(time.time()),
206 'api_key': self.current_user.name,
207 'file_type': filetype,
208 'user_agent': request.user_agent.string,
209 'ip': request.remote_addr
210 }
211
212 if query_id:
213 event['object_type'] = 'query'
214 event['object_id'] = query_id
215 else:
216 event['object_type'] = 'query_result'
217 event['object_id'] = query_result_id
218
219 record_event.delay(event)
220
221 if filetype == 'json':
222 response = self.make_json_response(query_result)
223 elif filetype == 'xlsx':
224 response = self.make_excel_response(query_result)
225 else:
226 response = self.make_csv_response(query_result)
227
228 if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:
229 self.add_cors_headers(response.headers)
230
231 if should_cache:
232 response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)
233
234 return response
235
236 else:
237 abort(404, message='No cached result found for this query.')
238
239 def make_json_response(self, query_result):
240 data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
241 headers = {'Content-Type': "application/json"}
242 return make_response(data, 200, headers)
243
244 @staticmethod
245 def make_csv_response(query_result):
246 headers = {'Content-Type': "text/csv; charset=UTF-8"}
247 return make_response(query_result.make_csv_content(), 200, headers)
248
249 @staticmethod
250 def make_excel_response(query_result):
251 headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
252 return make_response(query_result.make_excel_content(), 200, headers)
253
254
255 class JobResource(BaseResource):
256 def get(self, job_id):
257 """
258 Retrieve info about a running query job.
259 """
260 job = QueryTask(job_id=job_id)
261 return {'job': job.to_dict()}
262
263 def delete(self, job_id):
264 """
265 Cancel a query job in progress.
266 """
267 job = QueryTask(job_id=job_id)
268 job.cancel()
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py
--- a/redash/handlers/query_results.py
+++ b/redash/handlers/query_results.py
@@ -185,14 +185,19 @@
if query_result_id:
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
- elif query_id is not None:
+
+ if query_id is not None:
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
- if query is not None:
+ if query_result is None and query is not None:
if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:
query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)
elif query.latest_query_data_id is not None:
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)
+
+ if query is not None and query_result is not None and self.current_user.is_api_user():
+ if query.query_hash != query_result.query_hash:
+ abort(404, message='No cached result found for this query.')
if query_result:
require_access(query_result.data_source.groups, self.current_user, view_only)
@@ -229,7 +234,7 @@
self.add_cors_headers(response.headers)
if should_cache:
- response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)
+ response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR)
return response
| {"golden_diff": "diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py\n--- a/redash/handlers/query_results.py\n+++ b/redash/handlers/query_results.py\n@@ -185,14 +185,19 @@\n \n if query_result_id:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)\n- elif query_id is not None:\n+\n+ if query_id is not None:\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n \n- if query is not None:\n+ if query_result is None and query is not None:\n if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:\n query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)\n elif query.latest_query_data_id is not None:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)\n+ \n+ if query is not None and query_result is not None and self.current_user.is_api_user():\n+ if query.query_hash != query_result.query_hash:\n+ abort(404, message='No cached result found for this query.')\n \n if query_result:\n require_access(query_result.data_source.groups, self.current_user, view_only)\n@@ -229,7 +234,7 @@\n self.add_cors_headers(response.headers)\n \n if should_cache:\n- response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)\n+ response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR)\n \n return response\n", "issue": "The API key for one query may be used to retrieve another query's results\n### Issue Summary\r\n\r\nA query's API key may be used to obtain another query's results via the REST api when including the API key in the query string.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create one query and execute it to obtain results (call it query A)\r\n2. Create another query and execute it to obtain different results (call it query B)\r\n3. Get the query's API key for query A (A_API_KEY) and the query number for query A (A_QUERY_NUMBER)\r\n4. Get the result number from query B's most recent run (B_RESULT_NUMBER)\r\n5. Execute the below code and you'll see that the API key for query A can get results from query B\r\n\r\n```bash\r\nREDASH_DOMAIN='yourdomain.com'\r\nA_QUERY_NUMBER='query number for query A'\r\nA_API_KEY_A='api key for query A'\r\nB_RESULT_NUMBER='query result number for query b'\r\n\r\n# this will download query B's results using query A's access key\r\nwget \\\r\n -O query_b_results.csv \\\r\n \"https://$REDASH_DOMAIN/api/queries/$A_QUERY_NUMBER/results/$B_RESULT_NUMBER.csv?api_key=$A_API_KEY\"\r\n```\r\n\r\nThis is a bug because one query's API key should NOT be able to access another query's results.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3\r\n* Browser/OS: (Command Line) / Linux Mint 18.2\r\n* How did you install Redash: Command line\r\n\n", "before_files": [{"content": "import logging\nimport json\nimport time\n\nimport pystache\nfrom flask import make_response, request\nfrom flask_login import current_user\nfrom flask_restful import abort\nfrom redash import models, settings, utils\nfrom redash.tasks import QueryTask, record_event\nfrom redash.permissions import require_permission, not_view_only, has_access, require_access, view_only\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.utils import collect_query_parameters, collect_parameters_from_request, gen_query_hash\nfrom redash.tasks.queries import enqueue_query\n\n\ndef error_response(message):\n return {'job': {'status': 4, 'error': message}}, 400\n\n\n#\n# Run a parameterized query synchronously and return the result\n# DISCLAIMER: Temporary solution to support parameters in queries. Should be\n# removed once we refactor the query results API endpoints and handling\n# on the client side. Please don't reuse in other API handlers.\n#\ndef run_query_sync(data_source, parameter_values, query_text, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n raise Exception('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age <= 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n query_hash = gen_query_hash(query_text)\n\n if query_result:\n logging.info(\"Returning cached result for query %s\" % query_hash)\n return query_result\n\n try:\n started_at = time.time()\n data, error = data_source.query_runner.run_query(query_text, current_user)\n\n if error:\n logging.info('got bak error')\n logging.info(error)\n return None\n\n run_time = time.time() - started_at\n query_result, updated_query_ids = models.QueryResult.store_result(data_source.org, data_source,\n query_hash, query_text, data,\n run_time, utils.utcnow())\n\n models.db.session.commit()\n return query_result\n except Exception as e:\n if max_age > 0:\n abort(404, message=\"Unable to get result from the database, and no cached query result found.\")\n else:\n abort(503, message=\"Unable to get result from the database.\")\n return None\n\ndef run_query(data_source, parameter_values, query_text, query_id, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n return error_response('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if data_source.paused:\n if data_source.pause_reason:\n message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)\n else:\n message = '{} is paused. Please try later.'.format(data_source.name)\n\n return error_response(message)\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age == 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n if query_result:\n return {'query_result': query_result.to_dict()}\n else:\n job = enqueue_query(query_text, data_source, current_user.id, metadata={\"Username\": current_user.email, \"Query ID\": query_id})\n return {'job': job.to_dict()}\n\n\nclass QueryResultListResource(BaseResource):\n @require_permission('execute_query')\n def post(self):\n \"\"\"\n Execute a query (or retrieve recent results).\n\n :qparam string query: The query text to execute\n :qparam number query_id: The query object to update with the result (optional)\n :qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute\n :qparam number data_source_id: ID of data source to query\n \"\"\"\n params = request.get_json(force=True)\n parameter_values = collect_parameters_from_request(request.args)\n\n query = params['query']\n max_age = int(params.get('max_age', -1))\n query_id = params.get('query_id', 'adhoc')\n\n data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)\n\n if not has_access(data_source.groups, self.current_user, not_view_only):\n return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403\n\n self.record_event({\n 'action': 'execute_query',\n 'timestamp': int(time.time()),\n 'object_id': data_source.id,\n 'object_type': 'data_source',\n 'query': query\n })\n return run_query(data_source, parameter_values, query, query_id, max_age)\n\n\nONE_YEAR = 60 * 60 * 24 * 365.25\n\n\nclass QueryResultResource(BaseResource):\n @staticmethod\n def add_cors_headers(headers):\n if 'Origin' in request.headers:\n origin = request.headers['Origin']\n\n if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:\n headers['Access-Control-Allow-Origin'] = origin\n headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()\n\n @require_permission('view_query')\n def options(self, query_id=None, query_result_id=None, filetype='json'):\n headers = {}\n self.add_cors_headers(headers)\n\n if settings.ACCESS_CONTROL_REQUEST_METHOD:\n headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD\n\n if settings.ACCESS_CONTROL_ALLOW_HEADERS:\n headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS\n\n return make_response(\"\", 200, headers)\n\n @require_permission('view_query')\n def get(self, query_id=None, query_result_id=None, filetype='json'):\n \"\"\"\n Retrieve query results.\n\n :param number query_id: The ID of the query whose results should be fetched\n :param number query_result_id: the ID of the query result to fetch\n :param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.\n\n :<json number id: Query result ID\n :<json string query: Query that produced this result\n :<json string query_hash: Hash code for query text\n :<json object data: Query output\n :<json number data_source_id: ID of data source that produced this result\n :<json number runtime: Length of execution time in seconds\n :<json string retrieved_at: Query retrieval date/time, in ISO format\n \"\"\"\n # TODO:\n # This method handles two cases: retrieving result by id & retrieving result by query id.\n # They need to be split, as they have different logic (for example, retrieving by query id\n # should check for query parameters and shouldn't cache the result).\n should_cache = query_result_id is not None\n\n parameter_values = collect_parameters_from_request(request.args)\n max_age = int(request.args.get('maxAge', 0))\n\n query_result = None\n\n if query_result_id:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)\n elif query_id is not None:\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n\n if query is not None:\n if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:\n query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)\n elif query.latest_query_data_id is not None:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)\n\n if query_result:\n require_access(query_result.data_source.groups, self.current_user, view_only)\n\n if isinstance(self.current_user, models.ApiUser):\n event = {\n 'user_id': None,\n 'org_id': self.current_org.id,\n 'action': 'api_get',\n 'timestamp': int(time.time()),\n 'api_key': self.current_user.name,\n 'file_type': filetype,\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n }\n\n if query_id:\n event['object_type'] = 'query'\n event['object_id'] = query_id\n else:\n event['object_type'] = 'query_result'\n event['object_id'] = query_result_id\n\n record_event.delay(event)\n\n if filetype == 'json':\n response = self.make_json_response(query_result)\n elif filetype == 'xlsx':\n response = self.make_excel_response(query_result)\n else:\n response = self.make_csv_response(query_result)\n\n if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:\n self.add_cors_headers(response.headers)\n\n if should_cache:\n response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)\n\n return response\n\n else:\n abort(404, message='No cached result found for this query.')\n\n def make_json_response(self, query_result):\n data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)\n headers = {'Content-Type': \"application/json\"}\n return make_response(data, 200, headers)\n\n @staticmethod\n def make_csv_response(query_result):\n headers = {'Content-Type': \"text/csv; charset=UTF-8\"}\n return make_response(query_result.make_csv_content(), 200, headers)\n\n @staticmethod\n def make_excel_response(query_result):\n headers = {'Content-Type': \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n return make_response(query_result.make_excel_content(), 200, headers)\n\n\nclass JobResource(BaseResource):\n def get(self, job_id):\n \"\"\"\n Retrieve info about a running query job.\n \"\"\"\n job = QueryTask(job_id=job_id)\n return {'job': job.to_dict()}\n\n def delete(self, job_id):\n \"\"\"\n Cancel a query job in progress.\n \"\"\"\n job = QueryTask(job_id=job_id)\n job.cancel()\n", "path": "redash/handlers/query_results.py"}], "after_files": [{"content": "import logging\nimport json\nimport time\n\nimport pystache\nfrom flask import make_response, request\nfrom flask_login import current_user\nfrom flask_restful import abort\nfrom redash import models, settings, utils\nfrom redash.tasks import QueryTask, record_event\nfrom redash.permissions import require_permission, not_view_only, has_access, require_access, view_only\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.utils import collect_query_parameters, collect_parameters_from_request, gen_query_hash\nfrom redash.tasks.queries import enqueue_query\n\n\ndef error_response(message):\n return {'job': {'status': 4, 'error': message}}, 400\n\n\n#\n# Run a parameterized query synchronously and return the result\n# DISCLAIMER: Temporary solution to support parameters in queries. Should be\n# removed once we refactor the query results API endpoints and handling\n# on the client side. Please don't reuse in other API handlers.\n#\ndef run_query_sync(data_source, parameter_values, query_text, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n raise Exception('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age <= 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n query_hash = gen_query_hash(query_text)\n\n if query_result:\n logging.info(\"Returning cached result for query %s\" % query_hash)\n return query_result\n\n try:\n started_at = time.time()\n data, error = data_source.query_runner.run_query(query_text, current_user)\n\n if error:\n logging.info('got bak error')\n logging.info(error)\n return None\n\n run_time = time.time() - started_at\n query_result, updated_query_ids = models.QueryResult.store_result(data_source.org, data_source,\n query_hash, query_text, data,\n run_time, utils.utcnow())\n\n models.db.session.commit()\n return query_result\n except Exception as e:\n if max_age > 0:\n abort(404, message=\"Unable to get result from the database, and no cached query result found.\")\n else:\n abort(503, message=\"Unable to get result from the database.\")\n return None\n\ndef run_query(data_source, parameter_values, query_text, query_id, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n return error_response('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if data_source.paused:\n if data_source.pause_reason:\n message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)\n else:\n message = '{} is paused. Please try later.'.format(data_source.name)\n\n return error_response(message)\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age == 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n if query_result:\n return {'query_result': query_result.to_dict()}\n else:\n job = enqueue_query(query_text, data_source, current_user.id, metadata={\"Username\": current_user.email, \"Query ID\": query_id})\n return {'job': job.to_dict()}\n\n\nclass QueryResultListResource(BaseResource):\n @require_permission('execute_query')\n def post(self):\n \"\"\"\n Execute a query (or retrieve recent results).\n\n :qparam string query: The query text to execute\n :qparam number query_id: The query object to update with the result (optional)\n :qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute\n :qparam number data_source_id: ID of data source to query\n \"\"\"\n params = request.get_json(force=True)\n parameter_values = collect_parameters_from_request(request.args)\n\n query = params['query']\n max_age = int(params.get('max_age', -1))\n query_id = params.get('query_id', 'adhoc')\n\n data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)\n\n if not has_access(data_source.groups, self.current_user, not_view_only):\n return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403\n\n self.record_event({\n 'action': 'execute_query',\n 'timestamp': int(time.time()),\n 'object_id': data_source.id,\n 'object_type': 'data_source',\n 'query': query\n })\n return run_query(data_source, parameter_values, query, query_id, max_age)\n\n\nONE_YEAR = 60 * 60 * 24 * 365.25\n\n\nclass QueryResultResource(BaseResource):\n @staticmethod\n def add_cors_headers(headers):\n if 'Origin' in request.headers:\n origin = request.headers['Origin']\n\n if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:\n headers['Access-Control-Allow-Origin'] = origin\n headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()\n\n @require_permission('view_query')\n def options(self, query_id=None, query_result_id=None, filetype='json'):\n headers = {}\n self.add_cors_headers(headers)\n\n if settings.ACCESS_CONTROL_REQUEST_METHOD:\n headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD\n\n if settings.ACCESS_CONTROL_ALLOW_HEADERS:\n headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS\n\n return make_response(\"\", 200, headers)\n\n @require_permission('view_query')\n def get(self, query_id=None, query_result_id=None, filetype='json'):\n \"\"\"\n Retrieve query results.\n\n :param number query_id: The ID of the query whose results should be fetched\n :param number query_result_id: the ID of the query result to fetch\n :param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.\n\n :<json number id: Query result ID\n :<json string query: Query that produced this result\n :<json string query_hash: Hash code for query text\n :<json object data: Query output\n :<json number data_source_id: ID of data source that produced this result\n :<json number runtime: Length of execution time in seconds\n :<json string retrieved_at: Query retrieval date/time, in ISO format\n \"\"\"\n # TODO:\n # This method handles two cases: retrieving result by id & retrieving result by query id.\n # They need to be split, as they have different logic (for example, retrieving by query id\n # should check for query parameters and shouldn't cache the result).\n should_cache = query_result_id is not None\n\n parameter_values = collect_parameters_from_request(request.args)\n max_age = int(request.args.get('maxAge', 0))\n\n query_result = None\n\n if query_result_id:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)\n\n if query_id is not None:\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n\n if query_result is None and query is not None:\n if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:\n query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)\n elif query.latest_query_data_id is not None:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)\n \n if query is not None and query_result is not None and self.current_user.is_api_user():\n if query.query_hash != query_result.query_hash:\n abort(404, message='No cached result found for this query.')\n\n if query_result:\n require_access(query_result.data_source.groups, self.current_user, view_only)\n\n if isinstance(self.current_user, models.ApiUser):\n event = {\n 'user_id': None,\n 'org_id': self.current_org.id,\n 'action': 'api_get',\n 'timestamp': int(time.time()),\n 'api_key': self.current_user.name,\n 'file_type': filetype,\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n }\n\n if query_id:\n event['object_type'] = 'query'\n event['object_id'] = query_id\n else:\n event['object_type'] = 'query_result'\n event['object_id'] = query_result_id\n\n record_event.delay(event)\n\n if filetype == 'json':\n response = self.make_json_response(query_result)\n elif filetype == 'xlsx':\n response = self.make_excel_response(query_result)\n else:\n response = self.make_csv_response(query_result)\n\n if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:\n self.add_cors_headers(response.headers)\n\n if should_cache:\n response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR)\n\n return response\n\n else:\n abort(404, message='No cached result found for this query.')\n\n def make_json_response(self, query_result):\n data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)\n headers = {'Content-Type': \"application/json\"}\n return make_response(data, 200, headers)\n\n @staticmethod\n def make_csv_response(query_result):\n headers = {'Content-Type': \"text/csv; charset=UTF-8\"}\n return make_response(query_result.make_csv_content(), 200, headers)\n\n @staticmethod\n def make_excel_response(query_result):\n headers = {'Content-Type': \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n return make_response(query_result.make_excel_content(), 200, headers)\n\n\nclass JobResource(BaseResource):\n def get(self, job_id):\n \"\"\"\n Retrieve info about a running query job.\n \"\"\"\n job = QueryTask(job_id=job_id)\n return {'job': job.to_dict()}\n\n def delete(self, job_id):\n \"\"\"\n Cancel a query job in progress.\n \"\"\"\n job = QueryTask(job_id=job_id)\n job.cancel()\n", "path": "redash/handlers/query_results.py"}]} | 3,595 | 391 |
gh_patches_debug_12685 | rasdani/github-patches | git_diff | sopel-irc__sopel-985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in url setup procedure - Bot no longer posts titles of links
Hello. I've actually had this error for awhile and tried fixing it awhile back to no avail so I'm hoping someone here could help. The bot no longer seems to show titles of links posted in the channels and when starting the bot I get:
```
Error in url setup procedure: nothing to repeat (../../../../../lib/python2.7/sre_parse.py:517)
```
Which looking at that file points to this:
```
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
```
So I'm not sure what to do here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/url.py`
Content:
```
1 # coding=utf-8
2 """URL title module"""
3 # Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham
4 # Copyright 2012-2013 Elsie Powell
5 # Copyright 2013 Lior Ramati ([email protected])
6 # Copyright © 2014 Elad Alfassa <[email protected]>
7 # Licensed under the Eiffel Forum License 2.
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 import re
11 from sopel import web, tools
12 from sopel.module import commands, rule, example
13 from sopel.config.types import ValidatedAttribute, StaticSection
14
15
16 url_finder = None
17 # These are used to clean up the title tag before actually parsing it. Not the
18 # world's best way to do this, but it'll do for now.
19 title_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)
20 quoted_title = re.compile('[\'"]<title>[\'"]', re.IGNORECASE)
21 # This is another regex that presumably does something important.
22 re_dcc = re.compile(r'(?i)dcc\ssend')
23 # This sets the maximum number of bytes that should be read in order to find
24 # the title. We don't want it too high, or a link to a big file/stream will
25 # just keep downloading until there's no more memory. 640k ought to be enough
26 # for anybody.
27 max_bytes = 655360
28
29
30 class UrlSection(StaticSection):
31 # TODO some validation rules maybe?
32 exclude = ValidatedAttribute('exclude')
33 exclusion_char = ValidatedAttribute('exclusion_char', default='!')
34
35
36 def configure(config):
37 config.define_section('url', UrlSection)
38 config.url.configure_setting(
39 'exclude',
40 'Enter regular expressions for each URL you would like to exclude.'
41 )
42 config.url.configure_setting(
43 'exclusion_char',
44 'Enter a character which can be prefixed to suppress URL titling'
45 )
46
47
48 def setup(bot=None):
49 global url_finder
50
51 # TODO figure out why this is needed, and get rid of it, because really?
52 if not bot:
53 return
54 bot.config.define_section('url', UrlSection)
55
56 if bot.config.url.exclude:
57 regexes = [re.compile(s) for s in bot.config.url.exclude]
58 else:
59 regexes = []
60
61 # We're keeping these in their own list, rather than putting then in the
62 # callbacks list because 1, it's easier to deal with modules that are still
63 # using this list, and not the newer callbacks list and 2, having a lambda
64 # just to pass is kinda ugly.
65 if not bot.memory.contains('url_exclude'):
66 bot.memory['url_exclude'] = regexes
67 else:
68 exclude = bot.memory['url_exclude']
69 if regexes:
70 exclude.extend(regexes)
71 bot.memory['url_exclude'] = exclude
72
73 # Ensure that url_callbacks and last_seen_url are in memory
74 if not bot.memory.contains('url_callbacks'):
75 bot.memory['url_callbacks'] = tools.SopelMemory()
76 if not bot.memory.contains('last_seen_url'):
77 bot.memory['last_seen_url'] = tools.SopelMemory()
78
79 url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\S+))' %
80 (bot.config.url.exclusion_char))
81
82
83 @commands('title')
84 @example('.title http://google.com', '[ Google ] - google.com')
85 def title_command(bot, trigger):
86 """
87 Show the title or URL information for the given URL, or the last URL seen
88 in this channel.
89 """
90 if not trigger.group(2):
91 if trigger.sender not in bot.memory['last_seen_url']:
92 return
93 matched = check_callbacks(bot, trigger,
94 bot.memory['last_seen_url'][trigger.sender],
95 True)
96 if matched:
97 return
98 else:
99 urls = [bot.memory['last_seen_url'][trigger.sender]]
100 else:
101 urls = re.findall(url_finder, trigger)
102
103 results = process_urls(bot, trigger, urls)
104 for title, domain in results[:4]:
105 bot.reply('[ %s ] - %s' % (title, domain))
106
107
108 @rule('(?u).*(https?://\S+).*')
109 def title_auto(bot, trigger):
110 """
111 Automatically show titles for URLs. For shortened URLs/redirects, find
112 where the URL redirects to and show the title for that (or call a function
113 from another module to give more information).
114 """
115 if re.match(bot.config.core.prefix + 'title', trigger):
116 return
117
118 # Avoid fetching known malicious links
119 if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:
120 if bot.memory['safety_cache'][trigger]['positives'] > 1:
121 return
122
123 urls = re.findall(url_finder, trigger)
124 results = process_urls(bot, trigger, urls)
125 bot.memory['last_seen_url'][trigger.sender] = urls[-1]
126
127 for title, domain in results[:4]:
128 message = '[ %s ] - %s' % (title, domain)
129 # Guard against responding to other instances of this bot.
130 if message != trigger:
131 bot.say(message)
132
133
134 def process_urls(bot, trigger, urls):
135 """
136 For each URL in the list, ensure that it isn't handled by another module.
137 If not, find where it redirects to, if anywhere. If that redirected URL
138 should be handled by another module, dispatch the callback for it.
139 Return a list of (title, hostname) tuples for each URL which is not handled by
140 another module.
141 """
142
143 results = []
144 for url in urls:
145 if not url.startswith(bot.config.url.exclusion_char):
146 # Magic stuff to account for international domain names
147 try:
148 url = web.iri_to_uri(url)
149 except:
150 pass
151 # First, check that the URL we got doesn't match
152 matched = check_callbacks(bot, trigger, url, False)
153 if matched:
154 continue
155 # Then see if it redirects anywhere
156 new_url = follow_redirects(url)
157 if not new_url:
158 continue
159 # Then see if the final URL matches anything
160 matched = check_callbacks(bot, trigger, new_url, new_url != url)
161 if matched:
162 continue
163 # Finally, actually show the URL
164 title = find_title(url)
165 if title:
166 results.append((title, get_hostname(url)))
167 return results
168
169
170 def follow_redirects(url):
171 """
172 Follow HTTP 3xx redirects, and return the actual URL. Return None if
173 there's a problem.
174 """
175 try:
176 connection = web.get_urllib_object(url, 60)
177 url = connection.geturl() or url
178 connection.close()
179 except:
180 return None
181 return url
182
183
184 def check_callbacks(bot, trigger, url, run=True):
185 """
186 Check the given URL against the callbacks list. If it matches, and ``run``
187 is given as ``True``, run the callback function, otherwise pass. Returns
188 ``True`` if the url matched anything in the callbacks list.
189 """
190 # Check if it matches the exclusion list first
191 matched = any(regex.search(url) for regex in bot.memory['url_exclude'])
192 # Then, check if there's anything in the callback list
193 for regex, function in tools.iteritems(bot.memory['url_callbacks']):
194 match = regex.search(url)
195 if match:
196 if run:
197 function(bot, trigger, match)
198 matched = True
199 return matched
200
201
202 def find_title(url):
203 """Return the title for the given URL."""
204 try:
205 content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes)
206 except UnicodeDecodeError:
207 return # Fail silently when data can't be decoded
208
209 # Some cleanup that I don't really grok, but was in the original, so
210 # we'll keep it (with the compiled regexes made global) for now.
211 content = title_tag_data.sub(r'<\1title>', content)
212 content = quoted_title.sub('', content)
213
214 start = content.find('<title>')
215 end = content.find('</title>')
216 if start == -1 or end == -1:
217 return
218 title = web.decode(content[start + 7:end])
219 title = title.strip()[:200]
220
221 title = ' '.join(title.split()) # cleanly remove multiple spaces
222
223 # More cryptic regex substitutions. This one looks to be myano's invention.
224 title = re_dcc.sub('', title)
225
226 return title or None
227
228
229 def get_hostname(url):
230 idx = 7
231 if url.startswith('https://'):
232 idx = 8
233 elif url.startswith('ftp://'):
234 idx = 6
235 hostname = url[idx:]
236 slash = hostname.find('/')
237 if slash != -1:
238 hostname = hostname[:slash]
239 return hostname
240
241 if __name__ == "__main__":
242 from sopel.test_tools import run_example_tests
243 run_example_tests(__file__)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/url.py b/sopel/modules/url.py
--- a/sopel/modules/url.py
+++ b/sopel/modules/url.py
@@ -10,7 +10,7 @@
import re
from sopel import web, tools
from sopel.module import commands, rule, example
-from sopel.config.types import ValidatedAttribute, StaticSection
+from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection
url_finder = None
@@ -29,7 +29,7 @@
class UrlSection(StaticSection):
# TODO some validation rules maybe?
- exclude = ValidatedAttribute('exclude')
+ exclude = ListAttribute('exclude')
exclusion_char = ValidatedAttribute('exclusion_char', default='!')
| {"golden_diff": "diff --git a/sopel/modules/url.py b/sopel/modules/url.py\n--- a/sopel/modules/url.py\n+++ b/sopel/modules/url.py\n@@ -10,7 +10,7 @@\n import re\n from sopel import web, tools\n from sopel.module import commands, rule, example\n-from sopel.config.types import ValidatedAttribute, StaticSection\n+from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n \n \n url_finder = None\n@@ -29,7 +29,7 @@\n \n class UrlSection(StaticSection):\n # TODO some validation rules maybe?\n- exclude = ValidatedAttribute('exclude')\n+ exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n", "issue": "Error in url setup procedure - Bot no longer posts titles of links\nHello. I've actually had this error for awhile and tried fixing it awhile back to no avail so I'm hoping someone here could help. The bot no longer seems to show titles of links posted in the channels and when starting the bot I get:\n\n```\nError in url setup procedure: nothing to repeat (../../../../../lib/python2.7/sre_parse.py:517)\n```\n\nWhich looking at that file points to this:\n\n```\n if not item or (_len(item) == 1 and item[0][0] == AT):\n raise error, \"nothing to repeat\"\n```\n\nSo I'm not sure what to do here.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, StaticSection\n\n\nurl_finder = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ValidatedAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n\n\ndef setup(bot=None):\n global url_finder\n\n # TODO figure out why this is needed, and get rid of it, because really?\n if not bot:\n return\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\\S+))' %\n (bot.config.url.exclusion_char))\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = re.findall(url_finder, trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n\n\n@rule('(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = re.findall(url_finder, trigger)\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except:\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # Then see if it redirects anywhere\n new_url = follow_redirects(url)\n if not new_url:\n continue\n # Then see if the final URL matches anything\n matched = check_callbacks(bot, trigger, new_url, new_url != url)\n if matched:\n continue\n # Finally, actually show the URL\n title = find_title(url)\n if title:\n results.append((title, get_hostname(url)))\n return results\n\n\ndef follow_redirects(url):\n \"\"\"\n Follow HTTP 3xx redirects, and return the actual URL. Return None if\n there's a problem.\n \"\"\"\n try:\n connection = web.get_urllib_object(url, 60)\n url = connection.geturl() or url\n connection.close()\n except:\n return None\n return url\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n if run:\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes)\n except UnicodeDecodeError:\n return # Fail silently when data can't be decoded\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.find('<title>')\n end = content.find('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n\n\nurl_finder = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n\n\ndef setup(bot=None):\n global url_finder\n\n # TODO figure out why this is needed, and get rid of it, because really?\n if not bot:\n return\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\\S+))' %\n (bot.config.url.exclusion_char))\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = re.findall(url_finder, trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n\n\n@rule('(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = re.findall(url_finder, trigger)\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except:\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # Then see if it redirects anywhere\n new_url = follow_redirects(url)\n if not new_url:\n continue\n # Then see if the final URL matches anything\n matched = check_callbacks(bot, trigger, new_url, new_url != url)\n if matched:\n continue\n # Finally, actually show the URL\n title = find_title(url)\n if title:\n results.append((title, get_hostname(url)))\n return results\n\n\ndef follow_redirects(url):\n \"\"\"\n Follow HTTP 3xx redirects, and return the actual URL. Return None if\n there's a problem.\n \"\"\"\n try:\n connection = web.get_urllib_object(url, 60)\n url = connection.geturl() or url\n connection.close()\n except:\n return None\n return url\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n if run:\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes)\n except UnicodeDecodeError:\n return # Fail silently when data can't be decoded\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.find('<title>')\n end = content.find('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}]} | 3,034 | 168 |
gh_patches_debug_7367 | rasdani/github-patches | git_diff | holoviz__holoviews-5450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty Box-Whisker plot with the Matplotlib backend when vdims has NaNs
The following snippet correctly returns a Box-Whisker plot grouped by *carrier* with the Bokeh backend but just returns an empty plot with the matplotlib backend.
```python
import holoviews as hv
from hvplot.sample_data import us_crime, airline_flights
flights = airline_flights.to_dask().persist()
flight_subset = flights[flights.carrier.isin(['AA', 'US', 'OH'])]
ds = hv.Dataset(flight_subset, kdims=['carrier'], vdims=['depdelay'])
hv.BoxWhisker(ds)
```

Note that this has nothing to do with Dask, since adding `flight_subset = flight_subset.compute()` has no effect.
I've done some exploration and found out that the `depdelay` column has `NaN`s values, replacing them with `0` (or any float) fixes the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holoviews/plotting/mpl/stats.py`
Content:
```
1 import param
2 import numpy as np
3
4 from ...core.ndmapping import sorted_context
5 from .chart import AreaPlot, ChartPlot
6 from .path import PolygonPlot
7 from .plot import AdjoinedPlot
8
9
10 class DistributionPlot(AreaPlot):
11 """
12 DistributionPlot visualizes a distribution of values as a KDE.
13 """
14
15 bandwidth = param.Number(default=None, doc="""
16 The bandwidth of the kernel for the density estimate.""")
17
18 cut = param.Number(default=3, doc="""
19 Draw the estimate to cut * bw from the extreme data points.""")
20
21 filled = param.Boolean(default=True, doc="""
22 Whether the bivariate contours should be filled.""")
23
24
25 class BivariatePlot(PolygonPlot):
26 """
27 Bivariate plot visualizes two-dimensional kernel density
28 estimates. Additionally, by enabling the joint option, the
29 marginals distributions can be plotted alongside each axis (does
30 not animate or compose).
31 """
32
33 bandwidth = param.Number(default=None, doc="""
34 The bandwidth of the kernel for the density estimate.""")
35
36 cut = param.Number(default=3, doc="""
37 Draw the estimate to cut * bw from the extreme data points.""")
38
39 filled = param.Boolean(default=False, doc="""
40 Whether the bivariate contours should be filled.""")
41
42 levels = param.ClassSelector(default=10, class_=(list, int), doc="""
43 A list of scalar values used to specify the contour levels.""")
44
45
46 class BoxPlot(ChartPlot):
47 """
48 BoxPlot plots the ErrorBar Element type and supporting
49 both horizontal and vertical error bars via the 'horizontal'
50 plot option.
51 """
52
53 style_opts = ['notch', 'sym', 'whis', 'bootstrap',
54 'conf_intervals', 'widths', 'showmeans',
55 'show_caps', 'showfliers', 'boxprops',
56 'whiskerprops', 'capprops', 'flierprops',
57 'medianprops', 'meanprops', 'meanline']
58
59 _nonvectorized_styles = style_opts
60
61 _plot_methods = dict(single='boxplot')
62
63 def get_extents(self, element, ranges, range_type='combined'):
64 return super().get_extents(
65 element, ranges, range_type, 'categorical', element.vdims[0]
66 )
67
68 def get_data(self, element, ranges, style):
69 if element.kdims:
70 with sorted_context(False):
71 groups = element.groupby(element.kdims).data.items()
72 else:
73 groups = [(element.label, element)]
74
75 data, labels = [], []
76 for key, group in groups:
77 if element.kdims:
78 label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
79 else:
80 label = key
81 data.append(group[group.vdims[0]])
82 labels.append(label)
83 style['labels'] = labels
84 style = {k: v for k, v in style.items()
85 if k not in ['zorder', 'label']}
86 style['vert'] = not self.invert_axes
87 format_kdims = [kd.clone(value_format=None) for kd in element.kdims]
88 return (data,), style, {'dimensions': [format_kdims, element.vdims[0]]}
89
90 def init_artists(self, ax, plot_args, plot_kwargs):
91 artists = ax.boxplot(*plot_args, **plot_kwargs)
92 artists['artist'] = artists['boxes']
93 return artists
94
95 def teardown_handles(self):
96 for g in ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means'):
97 for v in self.handles.get(g, []):
98 v.remove()
99
100
101 class SideBoxPlot(AdjoinedPlot, BoxPlot):
102
103 bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
104 Make plot background invisible.""")
105
106 border_size = param.Number(default=0, doc="""
107 The size of the border expressed as a fraction of the main plot.""")
108
109 xaxis = param.ObjectSelector(default='bare',
110 objects=['top', 'bottom', 'bare', 'top-bare',
111 'bottom-bare', None], doc="""
112 Whether and where to display the xaxis, bare options allow suppressing
113 all axis labels including ticks and xlabel. Valid options are 'top',
114 'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
115
116 yaxis = param.ObjectSelector(default='bare',
117 objects=['left', 'right', 'bare', 'left-bare',
118 'right-bare', None], doc="""
119 Whether and where to display the yaxis, bare options allow suppressing
120 all axis labels including ticks and ylabel. Valid options are 'left',
121 'right', 'bare' 'left-bare' and 'right-bare'.""")
122
123 def __init__(self, *args, **kwargs):
124 super().__init__(*args, **kwargs)
125 if self.adjoined:
126 self.invert_axes = not self.invert_axes
127
128
129 class ViolinPlot(BoxPlot):
130 """
131 BoxPlot plots the ErrorBar Element type and supporting
132 both horizontal and vertical error bars via the 'horizontal'
133 plot option.
134 """
135
136 bandwidth = param.Number(default=None, doc="""
137 Allows supplying explicit bandwidth value rather than relying
138 on scott or silverman method.""")
139
140 inner = param.ObjectSelector(objects=['box', 'medians', None],
141 default='box', doc="""
142 Inner visual indicator for distribution values:
143
144 * box - A small box plot
145 * stick - Lines indicating each sample value
146 * quartiles - Indicates first, second and third quartiles
147 """)
148
149 _plot_methods = dict(single='violinplot')
150
151 style_opts = ['showmeans', 'facecolors', 'showextrema', 'bw_method',
152 'widths', 'stats_color', 'box_color', 'alpha', 'edgecolors']
153
154 _nonvectorized_styles = [
155 s for s in style_opts if s not in ('facecolors', 'edgecolors', 'widths')
156 ]
157
158 def init_artists(self, ax, plot_args, plot_kwargs):
159 box_color = plot_kwargs.pop('box_color', 'black')
160 stats_color = plot_kwargs.pop('stats_color', 'black')
161 facecolors = plot_kwargs.pop('facecolors', [])
162 edgecolors = plot_kwargs.pop('edgecolors', 'black')
163 labels = plot_kwargs.pop('labels')
164 alpha = plot_kwargs.pop('alpha', 1.)
165 showmedians = self.inner == 'medians'
166 bw_method = self.bandwidth or 'scott'
167 artists = ax.violinplot(*plot_args, bw_method=bw_method,
168 showmedians=showmedians, **plot_kwargs)
169 if self.inner == 'box':
170 box = ax.boxplot(*plot_args, positions=plot_kwargs['positions'],
171 showfliers=False, showcaps=False, patch_artist=True,
172 boxprops={'facecolor': box_color},
173 medianprops={'color': 'white'}, widths=0.1,
174 labels=labels)
175 artists.update(box)
176 for body, color in zip(artists['bodies'], facecolors):
177 body.set_facecolors(color)
178 body.set_edgecolors(edgecolors)
179 body.set_alpha(alpha)
180 for stat in ['cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars']:
181 if stat in artists:
182 artists[stat].set_edgecolors(stats_color)
183 artists['bodies'] = artists['bodies']
184 return artists
185
186 def get_data(self, element, ranges, style):
187 if element.kdims:
188 with sorted_context(False):
189 groups = element.groupby(element.kdims).data.items()
190 else:
191 groups = [(element.label, element)]
192
193 data, labels, colors = [], [], []
194 elstyle = self.lookup_options(element, 'style')
195 for i, (key, group) in enumerate(groups):
196 if element.kdims:
197 label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
198 else:
199 label = key
200 d = group[group.vdims[0]]
201 data.append(d[np.isfinite(d)])
202 labels.append(label)
203 colors.append(elstyle[i].get('facecolors', 'blue'))
204 style['positions'] = list(range(len(data)))
205 style['labels'] = labels
206 style['facecolors'] = colors
207
208 if element.ndims > 0:
209 element = element.aggregate(function=np.mean)
210 else:
211 element = element.clone([(element.aggregate(function=np.mean),)])
212
213 new_style = self._apply_transforms(element, ranges, style)
214 style = {k: v for k, v in new_style.items()
215 if k not in ['zorder', 'label']}
216 style['vert'] = not self.invert_axes
217 format_kdims = [kd.clone(value_format=None) for kd in element.kdims]
218 ticks = {'yticks' if self.invert_axes else 'xticks': list(enumerate(labels))}
219 return (data,), style, dict(dimensions=[format_kdims, element.vdims[0]], **ticks)
220
221 def teardown_handles(self):
222 box_artists = ('cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars', 'bodies')
223 violin_artists = ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means')
224 for group in box_artists+violin_artists:
225 for v in self.handles.get(group, []):
226 v.remove()
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holoviews/plotting/mpl/stats.py b/holoviews/plotting/mpl/stats.py
--- a/holoviews/plotting/mpl/stats.py
+++ b/holoviews/plotting/mpl/stats.py
@@ -78,7 +78,8 @@
label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
else:
label = key
- data.append(group[group.vdims[0]])
+ d = group[group.vdims[0]]
+ data.append(d[np.isfinite(d)])
labels.append(label)
style['labels'] = labels
style = {k: v for k, v in style.items()
| {"golden_diff": "diff --git a/holoviews/plotting/mpl/stats.py b/holoviews/plotting/mpl/stats.py\n--- a/holoviews/plotting/mpl/stats.py\n+++ b/holoviews/plotting/mpl/stats.py\n@@ -78,7 +78,8 @@\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n- data.append(group[group.vdims[0]])\n+ d = group[group.vdims[0]]\n+ data.append(d[np.isfinite(d)])\n labels.append(label)\n style['labels'] = labels\n style = {k: v for k, v in style.items()\n", "issue": "Empty Box-Whisker plot with the Matplotlib backend when vdims has NaNs\nThe following snippet correctly returns a Box-Whisker plot grouped by *carrier* with the Bokeh backend but just returns an empty plot with the matplotlib backend.\r\n\r\n```python\r\nimport holoviews as hv\r\nfrom hvplot.sample_data import us_crime, airline_flights\r\n\r\nflights = airline_flights.to_dask().persist()\r\nflight_subset = flights[flights.carrier.isin(['AA', 'US', 'OH'])]\r\n\r\nds = hv.Dataset(flight_subset, kdims=['carrier'], vdims=['depdelay'])\r\nhv.BoxWhisker(ds)\r\n```\r\n\r\n\r\nNote that this has nothing to do with Dask, since adding `flight_subset = flight_subset.compute()` has no effect.\r\n\r\nI've done some exploration and found out that the `depdelay` column has `NaN`s values, replacing them with `0` (or any float) fixes the issue. \n", "before_files": [{"content": "import param\nimport numpy as np\n\nfrom ...core.ndmapping import sorted_context\nfrom .chart import AreaPlot, ChartPlot\nfrom .path import PolygonPlot\nfrom .plot import AdjoinedPlot\n\n\nclass DistributionPlot(AreaPlot):\n \"\"\"\n DistributionPlot visualizes a distribution of values as a KDE.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=True, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n\nclass BivariatePlot(PolygonPlot):\n \"\"\"\n Bivariate plot visualizes two-dimensional kernel density\n estimates. Additionally, by enabling the joint option, the\n marginals distributions can be plotted alongside each axis (does\n not animate or compose).\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n\nclass BoxPlot(ChartPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n style_opts = ['notch', 'sym', 'whis', 'bootstrap',\n 'conf_intervals', 'widths', 'showmeans',\n 'show_caps', 'showfliers', 'boxprops',\n 'whiskerprops', 'capprops', 'flierprops',\n 'medianprops', 'meanprops', 'meanline']\n\n _nonvectorized_styles = style_opts\n\n _plot_methods = dict(single='boxplot')\n\n def get_extents(self, element, ranges, range_type='combined'):\n return super().get_extents(\n element, ranges, range_type, 'categorical', element.vdims[0]\n )\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels = [], []\n for key, group in groups:\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n data.append(group[group.vdims[0]])\n labels.append(label)\n style['labels'] = labels\n style = {k: v for k, v in style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n return (data,), style, {'dimensions': [format_kdims, element.vdims[0]]}\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n artists = ax.boxplot(*plot_args, **plot_kwargs)\n artists['artist'] = artists['boxes']\n return artists\n\n def teardown_handles(self):\n for g in ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means'):\n for v in self.handles.get(g, []):\n v.remove()\n\n\nclass SideBoxPlot(AdjoinedPlot, BoxPlot):\n\n bgcolor = param.Parameter(default=(1, 1, 1, 0), doc=\"\"\"\n Make plot background invisible.\"\"\")\n\n border_size = param.Number(default=0, doc=\"\"\"\n The size of the border expressed as a fraction of the main plot.\"\"\")\n\n xaxis = param.ObjectSelector(default='bare',\n objects=['top', 'bottom', 'bare', 'top-bare',\n 'bottom-bare', None], doc=\"\"\"\n Whether and where to display the xaxis, bare options allow suppressing\n all axis labels including ticks and xlabel. Valid options are 'top',\n 'bottom', 'bare', 'top-bare' and 'bottom-bare'.\"\"\")\n\n yaxis = param.ObjectSelector(default='bare',\n objects=['left', 'right', 'bare', 'left-bare',\n 'right-bare', None], doc=\"\"\"\n Whether and where to display the yaxis, bare options allow suppressing\n all axis labels including ticks and ylabel. Valid options are 'left',\n 'right', 'bare' 'left-bare' and 'right-bare'.\"\"\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.adjoined:\n self.invert_axes = not self.invert_axes\n\n\nclass ViolinPlot(BoxPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n Allows supplying explicit bandwidth value rather than relying\n on scott or silverman method.\"\"\")\n\n inner = param.ObjectSelector(objects=['box', 'medians', None],\n default='box', doc=\"\"\"\n Inner visual indicator for distribution values:\n\n * box - A small box plot\n * stick - Lines indicating each sample value\n * quartiles - Indicates first, second and third quartiles\n \"\"\")\n\n _plot_methods = dict(single='violinplot')\n\n style_opts = ['showmeans', 'facecolors', 'showextrema', 'bw_method',\n 'widths', 'stats_color', 'box_color', 'alpha', 'edgecolors']\n\n _nonvectorized_styles = [\n s for s in style_opts if s not in ('facecolors', 'edgecolors', 'widths')\n ]\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n box_color = plot_kwargs.pop('box_color', 'black')\n stats_color = plot_kwargs.pop('stats_color', 'black')\n facecolors = plot_kwargs.pop('facecolors', [])\n edgecolors = plot_kwargs.pop('edgecolors', 'black')\n labels = plot_kwargs.pop('labels')\n alpha = plot_kwargs.pop('alpha', 1.)\n showmedians = self.inner == 'medians'\n bw_method = self.bandwidth or 'scott'\n artists = ax.violinplot(*plot_args, bw_method=bw_method,\n showmedians=showmedians, **plot_kwargs)\n if self.inner == 'box':\n box = ax.boxplot(*plot_args, positions=plot_kwargs['positions'],\n showfliers=False, showcaps=False, patch_artist=True,\n boxprops={'facecolor': box_color},\n medianprops={'color': 'white'}, widths=0.1,\n labels=labels)\n artists.update(box)\n for body, color in zip(artists['bodies'], facecolors):\n body.set_facecolors(color)\n body.set_edgecolors(edgecolors)\n body.set_alpha(alpha)\n for stat in ['cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars']:\n if stat in artists:\n artists[stat].set_edgecolors(stats_color)\n artists['bodies'] = artists['bodies']\n return artists\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels, colors = [], [], []\n elstyle = self.lookup_options(element, 'style')\n for i, (key, group) in enumerate(groups):\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n d = group[group.vdims[0]]\n data.append(d[np.isfinite(d)])\n labels.append(label)\n colors.append(elstyle[i].get('facecolors', 'blue'))\n style['positions'] = list(range(len(data)))\n style['labels'] = labels\n style['facecolors'] = colors\n\n if element.ndims > 0:\n element = element.aggregate(function=np.mean)\n else:\n element = element.clone([(element.aggregate(function=np.mean),)])\n\n new_style = self._apply_transforms(element, ranges, style)\n style = {k: v for k, v in new_style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n ticks = {'yticks' if self.invert_axes else 'xticks': list(enumerate(labels))}\n return (data,), style, dict(dimensions=[format_kdims, element.vdims[0]], **ticks)\n\n def teardown_handles(self):\n box_artists = ('cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars', 'bodies')\n violin_artists = ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means')\n for group in box_artists+violin_artists:\n for v in self.handles.get(group, []):\n v.remove()\n", "path": "holoviews/plotting/mpl/stats.py"}], "after_files": [{"content": "import param\nimport numpy as np\n\nfrom ...core.ndmapping import sorted_context\nfrom .chart import AreaPlot, ChartPlot\nfrom .path import PolygonPlot\nfrom .plot import AdjoinedPlot\n\n\nclass DistributionPlot(AreaPlot):\n \"\"\"\n DistributionPlot visualizes a distribution of values as a KDE.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=True, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n\nclass BivariatePlot(PolygonPlot):\n \"\"\"\n Bivariate plot visualizes two-dimensional kernel density\n estimates. Additionally, by enabling the joint option, the\n marginals distributions can be plotted alongside each axis (does\n not animate or compose).\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n\nclass BoxPlot(ChartPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n style_opts = ['notch', 'sym', 'whis', 'bootstrap',\n 'conf_intervals', 'widths', 'showmeans',\n 'show_caps', 'showfliers', 'boxprops',\n 'whiskerprops', 'capprops', 'flierprops',\n 'medianprops', 'meanprops', 'meanline']\n\n _nonvectorized_styles = style_opts\n\n _plot_methods = dict(single='boxplot')\n\n def get_extents(self, element, ranges, range_type='combined'):\n return super().get_extents(\n element, ranges, range_type, 'categorical', element.vdims[0]\n )\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels = [], []\n for key, group in groups:\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n d = group[group.vdims[0]]\n data.append(d[np.isfinite(d)])\n labels.append(label)\n style['labels'] = labels\n style = {k: v for k, v in style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n return (data,), style, {'dimensions': [format_kdims, element.vdims[0]]}\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n artists = ax.boxplot(*plot_args, **plot_kwargs)\n artists['artist'] = artists['boxes']\n return artists\n\n def teardown_handles(self):\n for g in ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means'):\n for v in self.handles.get(g, []):\n v.remove()\n\n\nclass SideBoxPlot(AdjoinedPlot, BoxPlot):\n\n bgcolor = param.Parameter(default=(1, 1, 1, 0), doc=\"\"\"\n Make plot background invisible.\"\"\")\n\n border_size = param.Number(default=0, doc=\"\"\"\n The size of the border expressed as a fraction of the main plot.\"\"\")\n\n xaxis = param.ObjectSelector(default='bare',\n objects=['top', 'bottom', 'bare', 'top-bare',\n 'bottom-bare', None], doc=\"\"\"\n Whether and where to display the xaxis, bare options allow suppressing\n all axis labels including ticks and xlabel. Valid options are 'top',\n 'bottom', 'bare', 'top-bare' and 'bottom-bare'.\"\"\")\n\n yaxis = param.ObjectSelector(default='bare',\n objects=['left', 'right', 'bare', 'left-bare',\n 'right-bare', None], doc=\"\"\"\n Whether and where to display the yaxis, bare options allow suppressing\n all axis labels including ticks and ylabel. Valid options are 'left',\n 'right', 'bare' 'left-bare' and 'right-bare'.\"\"\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.adjoined:\n self.invert_axes = not self.invert_axes\n\n\nclass ViolinPlot(BoxPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n Allows supplying explicit bandwidth value rather than relying\n on scott or silverman method.\"\"\")\n\n inner = param.ObjectSelector(objects=['box', 'medians', None],\n default='box', doc=\"\"\"\n Inner visual indicator for distribution values:\n\n * box - A small box plot\n * stick - Lines indicating each sample value\n * quartiles - Indicates first, second and third quartiles\n \"\"\")\n\n _plot_methods = dict(single='violinplot')\n\n style_opts = ['showmeans', 'facecolors', 'showextrema', 'bw_method',\n 'widths', 'stats_color', 'box_color', 'alpha', 'edgecolors']\n\n _nonvectorized_styles = [\n s for s in style_opts if s not in ('facecolors', 'edgecolors', 'widths')\n ]\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n box_color = plot_kwargs.pop('box_color', 'black')\n stats_color = plot_kwargs.pop('stats_color', 'black')\n facecolors = plot_kwargs.pop('facecolors', [])\n edgecolors = plot_kwargs.pop('edgecolors', 'black')\n labels = plot_kwargs.pop('labels')\n alpha = plot_kwargs.pop('alpha', 1.)\n showmedians = self.inner == 'medians'\n bw_method = self.bandwidth or 'scott'\n artists = ax.violinplot(*plot_args, bw_method=bw_method,\n showmedians=showmedians, **plot_kwargs)\n if self.inner == 'box':\n box = ax.boxplot(*plot_args, positions=plot_kwargs['positions'],\n showfliers=False, showcaps=False, patch_artist=True,\n boxprops={'facecolor': box_color},\n medianprops={'color': 'white'}, widths=0.1,\n labels=labels)\n artists.update(box)\n for body, color in zip(artists['bodies'], facecolors):\n body.set_facecolors(color)\n body.set_edgecolors(edgecolors)\n body.set_alpha(alpha)\n for stat in ['cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars']:\n if stat in artists:\n artists[stat].set_edgecolors(stats_color)\n artists['bodies'] = artists['bodies']\n return artists\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels, colors = [], [], []\n elstyle = self.lookup_options(element, 'style')\n for i, (key, group) in enumerate(groups):\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n d = group[group.vdims[0]]\n data.append(d[np.isfinite(d)])\n labels.append(label)\n colors.append(elstyle[i].get('facecolors', 'blue'))\n style['positions'] = list(range(len(data)))\n style['labels'] = labels\n style['facecolors'] = colors\n\n if element.ndims > 0:\n element = element.aggregate(function=np.mean)\n else:\n element = element.clone([(element.aggregate(function=np.mean),)])\n\n new_style = self._apply_transforms(element, ranges, style)\n style = {k: v for k, v in new_style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n ticks = {'yticks' if self.invert_axes else 'xticks': list(enumerate(labels))}\n return (data,), style, dict(dimensions=[format_kdims, element.vdims[0]], **ticks)\n\n def teardown_handles(self):\n box_artists = ('cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars', 'bodies')\n violin_artists = ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means')\n for group in box_artists+violin_artists:\n for v in self.handles.get(group, []):\n v.remove()\n", "path": "holoviews/plotting/mpl/stats.py"}]} | 3,175 | 164 |
gh_patches_debug_1285 | rasdani/github-patches | git_diff | openai__gym-2633 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug Report] Empty print version warning
**Describe the bug**
When I import gym, there's an empty line printed.
It's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30
Either it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `""` (empty string).
Currently it's cluttering the logs at best, or masking some other issue.
**Code example**
```python
import gym
```
**System Info**
Describe the characteristic of your environment:
Latest gym installed from pip, Ubuntu 20.04, Python 3.9.7
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/__init__.py`
Content:
```
1 from gym import error
2 from gym.version import VERSION as __version__
3
4 from gym.core import (
5 Env,
6 Wrapper,
7 ObservationWrapper,
8 ActionWrapper,
9 RewardWrapper,
10 )
11 from gym.spaces import Space
12 from gym.envs import make, spec, register
13 from gym import logger
14 from gym import vector
15 from gym import wrappers
16 import os
17
18
19 __all__ = ["Env", "Space", "Wrapper", "make", "spec", "register"]
20
21 os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
22
23 try:
24 import gym_notices.notices as notices
25 import sys
26
27 # print version warning if necessary
28 notice = notices.notices.get(__version__)
29 if notice is not None:
30 print(notice, file=sys.stderr)
31
32 except Exception: # nosec
33 pass
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/__init__.py b/gym/__init__.py
--- a/gym/__init__.py
+++ b/gym/__init__.py
@@ -26,7 +26,7 @@
# print version warning if necessary
notice = notices.notices.get(__version__)
- if notice is not None:
+ if notice:
print(notice, file=sys.stderr)
except Exception: # nosec
| {"golden_diff": "diff --git a/gym/__init__.py b/gym/__init__.py\n--- a/gym/__init__.py\n+++ b/gym/__init__.py\n@@ -26,7 +26,7 @@\n \n # print version warning if necessary\n notice = notices.notices.get(__version__)\n- if notice is not None:\n+ if notice:\n print(notice, file=sys.stderr)\n \n except Exception: # nosec\n", "issue": "[Bug Report] Empty print version warning \n**Describe the bug**\r\nWhen I import gym, there's an empty line printed.\r\nIt's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30\r\n\r\nEither it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `\"\"` (empty string).\r\n\r\nCurrently it's cluttering the logs at best, or masking some other issue.\r\n\r\n**Code example**\r\n```python\r\nimport gym\r\n```\r\n\r\n**System Info**\r\nDescribe the characteristic of your environment:\r\nLatest gym installed from pip, Ubuntu 20.04, Python 3.9.7\r\n\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice is not None:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n", "path": "gym/__init__.py"}], "after_files": [{"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n", "path": "gym/__init__.py"}]} | 691 | 98 |
gh_patches_debug_14124 | rasdani/github-patches | git_diff | pypa__pipenv-5628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pipenv check looks at installed packages, not Pipfile.lock
## Use Case
I would like to run `pipenv check` as a separate job from the build/test job inside a CI pipeline without rebuilding environment. I discovered that I must actually install all packages to a `pipenv` environment before using `pipenv check`. Ideally, I should be able to scan the dependencies inside `Pipfile.lock` without actually installing the whole environment.
I believe its misleading that right now `pipenv` is just acting as a "proxy" to `safety`, and by default checks an environment that may not match `Pipfile.lock`. By using `pipenv check` the assumption should be that it is checking the environment specified in `Pipfile.lock` and if you need to check an environment that deviates, you use `safety` directly.
I've traced the behavior down to these lines:
https://github.com/pypa/pipenv/blob/8939c863464b23b5503569669d1c3f9ad31a498f/pipenv/core.py#L2900-L2902
Instead of generating the temp `requirements.txt` file from the current environment using `pip list`, can we instead generate the temp `requirements.txt` from `Pipfile.lock`? Something like
```python
# this command should also respect the wishes of the --dev argument, if provided. Unsure on specifics of implementation
target_venv_packages = run_command(
_cmd + ["-m", "pipenv", "requirements"], is_verbose=project.s.is_verbose()
)
```
## Workaround
I'm currently using the following workaround in my CI job, but would like to go through `pipenv` directly.
```bash
pipenv requirements --dev | safety check --stdin
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/routines/check.py`
Content:
```
1 import io
2 import json as simplejson
3 import os
4 import sys
5 import tempfile
6 from pathlib import Path
7
8 from pipenv import exceptions, pep508checker
9 from pipenv.utils.processes import run_command
10 from pipenv.utils.project import ensure_project
11 from pipenv.utils.shell import cmd_list_to_shell, project_python
12 from pipenv.vendor import click, plette
13
14
15 def do_check(
16 project,
17 python=False,
18 system=False,
19 db=None,
20 ignore=None,
21 output="screen",
22 key=None,
23 quiet=False,
24 exit_code=True,
25 policy_file="",
26 save_json="",
27 audit_and_monitor=True,
28 safety_project=None,
29 pypi_mirror=None,
30 use_installed=False,
31 categories="",
32 ):
33 import json
34
35 if not system:
36 # Ensure that virtualenv is available.
37 ensure_project(
38 project,
39 python=python,
40 validate=False,
41 warn=False,
42 pypi_mirror=pypi_mirror,
43 )
44 if not quiet and not project.s.is_quiet():
45 click.secho("Checking PEP 508 requirements...", bold=True)
46 pep508checker_path = pep508checker.__file__.rstrip("cdo")
47 safety_path = os.path.join(
48 os.path.dirname(os.path.abspath(__file__)), "patched", "safety"
49 )
50 _cmd = [project_python(project, system=system)]
51 # Run the PEP 508 checker in the virtualenv.
52 cmd = _cmd + [Path(pep508checker_path).as_posix()]
53 c = run_command(cmd, is_verbose=project.s.is_verbose())
54 results = []
55 if c.returncode is not None:
56 try:
57 results = simplejson.loads(c.stdout.strip())
58 except json.JSONDecodeError:
59 click.echo(
60 "{}\n{}\n{}".format(
61 click.style(
62 "Failed parsing pep508 results: ",
63 fg="white",
64 bold=True,
65 ),
66 c.stdout.strip(),
67 c.stderr.strip(),
68 )
69 )
70 sys.exit(1)
71 # Load the pipfile.
72 p = plette.Pipfile.load(open(project.pipfile_location))
73 p = plette.Lockfile.with_meta_from(p)
74 failed = False
75 # Assert each specified requirement.
76 for marker, specifier in p._data["_meta"]["requires"].items():
77 if marker in results:
78 try:
79 assert results[marker] == specifier
80 except AssertionError:
81 failed = True
82 click.echo(
83 "Specifier {} does not match {} ({})."
84 "".format(
85 click.style(marker, fg="green"),
86 click.style(specifier, fg="cyan"),
87 click.style(results[marker], fg="yellow"),
88 ),
89 err=True,
90 )
91 if failed:
92 click.secho("Failed!", fg="red", err=True)
93 sys.exit(1)
94 else:
95 if not quiet and not project.s.is_quiet():
96 click.secho("Passed!", fg="green")
97 if not quiet and not project.s.is_quiet():
98 click.secho(
99 "Checking installed packages for vulnerabilities...",
100 bold=True,
101 )
102 if ignore:
103 if not isinstance(ignore, (tuple, list)):
104 ignore = [ignore]
105 ignored = [["--ignore", cve] for cve in ignore]
106 if not quiet and not project.s.is_quiet():
107 click.echo(
108 "Notice: Ignoring Vulnerabilit{} {}".format(
109 "ies" if len(ignored) > 1 else "y",
110 click.style(", ".join(ignore), fg="yellow"),
111 ),
112 err=True,
113 )
114 else:
115 ignored = []
116
117 options = [
118 "--audit-and-monitor" if audit_and_monitor else "--disable-audit-and-monitor",
119 "--exit-code" if exit_code else "--continue-on-error",
120 ]
121
122 if output == "full-report":
123 options.append("--full-report")
124 elif output == "minimal":
125 options.append("--json")
126 elif output not in ["screen", "default"]:
127 options.append(f"--output={output}")
128
129 if save_json:
130 options.append(f"--save-json={save_json}")
131
132 if policy_file:
133 options.append(f"--policy-file={policy_file}")
134
135 if safety_project:
136 options.append(f"--project={safety_project}")
137
138 if use_installed:
139 target_venv_packages = run_command(
140 _cmd + ["-m", "pip", "list", "--format=freeze"],
141 is_verbose=project.s.is_verbose(),
142 )
143 elif categories:
144 target_venv_packages = run_command(
145 ["pipenv", "requirements", "--categories", categories],
146 is_verbose=project.s.is_verbose(),
147 )
148 else:
149 target_venv_packages = run_command(
150 ["pipenv", "requirements"], is_verbose=project.s.is_verbose()
151 )
152
153 temp_requirements = tempfile.NamedTemporaryFile(
154 mode="w+",
155 prefix=f"{project.virtualenv_name}",
156 suffix="_requirements.txt",
157 delete=False,
158 )
159 temp_requirements.write(target_venv_packages.stdout.strip())
160 temp_requirements.close()
161
162 options.extend(["--file", temp_requirements.name])
163
164 cmd = _cmd + [safety_path, "check"] + options
165
166 if db:
167 if not quiet and not project.s.is_quiet():
168 click.echo(f"Using {db} database")
169 cmd.append(f"--db={db}")
170 elif key or project.s.PIPENV_PYUP_API_KEY:
171 cmd = cmd + [f"--key={key or project.s.PIPENV_PYUP_API_KEY}"]
172 else:
173 PIPENV_SAFETY_DB = (
174 "https://d2qjmgddvqvu75.cloudfront.net/aws/safety/pipenv/1.0.0/"
175 )
176 os.environ["SAFETY_ANNOUNCEMENTS_URL"] = f"{PIPENV_SAFETY_DB}announcements.json"
177 cmd.append(f"--db={PIPENV_SAFETY_DB}")
178
179 if ignored:
180 for cve in ignored:
181 cmd += cve
182
183 os.environ["SAFETY_CUSTOM_INTEGRATION"] = "True"
184 os.environ["SAFETY_SOURCE"] = "pipenv"
185 os.environ["SAFETY_PURE_YAML"] = "True"
186
187 from pipenv.patched.safety.cli import cli
188
189 sys.argv = cmd[1:]
190
191 if output == "minimal":
192 from contextlib import redirect_stderr, redirect_stdout
193
194 code = 0
195
196 with redirect_stdout(io.StringIO()) as out, redirect_stderr(io.StringIO()) as err:
197 try:
198 cli(prog_name="pipenv")
199 except SystemExit as exit_signal:
200 code = exit_signal.code
201
202 report = out.getvalue()
203 error = err.getvalue()
204
205 try:
206 json_report = simplejson.loads(report)
207 except Exception:
208 raise exceptions.PipenvCmdError(
209 cmd_list_to_shell(cmd), report, error, exit_code=code
210 )
211 meta = json_report.get("report_meta")
212 vulnerabilities_found = meta.get("vulnerabilities_found")
213
214 fg = "green"
215 message = "All good!"
216 db_type = "commercial" if meta.get("api_key", False) else "free"
217
218 if vulnerabilities_found >= 0:
219 fg = "red"
220 message = (
221 f"Scan was complete using Safety’s {db_type} vulnerability database."
222 )
223
224 click.echo()
225 click.secho(f"{vulnerabilities_found} vulnerabilities found.", fg=fg)
226 click.echo()
227
228 vulnerabilities = json_report.get("vulnerabilities", [])
229
230 for vuln in vulnerabilities:
231 click.echo(
232 "{}: {} {} open to vulnerability {} ({}). More info: {}".format(
233 click.style(vuln["vulnerability_id"], bold=True, fg="red"),
234 click.style(vuln["package_name"], fg="green"),
235 click.style(vuln["analyzed_version"], fg="yellow", bold=True),
236 click.style(vuln["vulnerability_id"], bold=True),
237 click.style(vuln["vulnerable_spec"], fg="yellow", bold=False),
238 click.style(vuln["more_info_url"], bold=True),
239 )
240 )
241 click.echo(f"{vuln['advisory']}")
242 click.echo()
243
244 click.secho(message, fg="white", bold=True)
245 sys.exit(code)
246
247 cli(prog_name="pipenv")
248
249 temp_requirements.remove()
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pipenv/routines/check.py b/pipenv/routines/check.py
--- a/pipenv/routines/check.py
+++ b/pipenv/routines/check.py
@@ -95,10 +95,16 @@
if not quiet and not project.s.is_quiet():
click.secho("Passed!", fg="green")
if not quiet and not project.s.is_quiet():
- click.secho(
- "Checking installed packages for vulnerabilities...",
- bold=True,
- )
+ if use_installed:
+ click.secho(
+ "Checking installed packages for vulnerabilities...",
+ bold=True,
+ )
+ else:
+ click.secho(
+ "Checking Pipfile.lock packages for vulnerabilities...",
+ bold=True,
+ )
if ignore:
if not isinstance(ignore, (tuple, list)):
ignore = [ignore]
| {"golden_diff": "diff --git a/pipenv/routines/check.py b/pipenv/routines/check.py\n--- a/pipenv/routines/check.py\n+++ b/pipenv/routines/check.py\n@@ -95,10 +95,16 @@\n if not quiet and not project.s.is_quiet():\n click.secho(\"Passed!\", fg=\"green\")\n if not quiet and not project.s.is_quiet():\n- click.secho(\n- \"Checking installed packages for vulnerabilities...\",\n- bold=True,\n- )\n+ if use_installed:\n+ click.secho(\n+ \"Checking installed packages for vulnerabilities...\",\n+ bold=True,\n+ )\n+ else:\n+ click.secho(\n+ \"Checking Pipfile.lock packages for vulnerabilities...\",\n+ bold=True,\n+ )\n if ignore:\n if not isinstance(ignore, (tuple, list)):\n ignore = [ignore]\n", "issue": "pipenv check looks at installed packages, not Pipfile.lock\n## Use Case\r\n\r\nI would like to run `pipenv check` as a separate job from the build/test job inside a CI pipeline without rebuilding environment. I discovered that I must actually install all packages to a `pipenv` environment before using `pipenv check`. Ideally, I should be able to scan the dependencies inside `Pipfile.lock` without actually installing the whole environment. \r\n\r\nI believe its misleading that right now `pipenv` is just acting as a \"proxy\" to `safety`, and by default checks an environment that may not match `Pipfile.lock`. By using `pipenv check` the assumption should be that it is checking the environment specified in `Pipfile.lock` and if you need to check an environment that deviates, you use `safety` directly.\r\n\r\nI've traced the behavior down to these lines:\r\nhttps://github.com/pypa/pipenv/blob/8939c863464b23b5503569669d1c3f9ad31a498f/pipenv/core.py#L2900-L2902\r\n\r\nInstead of generating the temp `requirements.txt` file from the current environment using `pip list`, can we instead generate the temp `requirements.txt` from `Pipfile.lock`? Something like\r\n\r\n```python\r\n# this command should also respect the wishes of the --dev argument, if provided. Unsure on specifics of implementation\r\ntarget_venv_packages = run_command(\r\n _cmd + [\"-m\", \"pipenv\", \"requirements\"], is_verbose=project.s.is_verbose()\r\n )\r\n```\r\n\r\n## Workaround\r\n\r\nI'm currently using the following workaround in my CI job, but would like to go through `pipenv` directly. \r\n\r\n```bash\r\npipenv requirements --dev | safety check --stdin\r\n```\n", "before_files": [{"content": "import io\nimport json as simplejson\nimport os\nimport sys\nimport tempfile\nfrom pathlib import Path\n\nfrom pipenv import exceptions, pep508checker\nfrom pipenv.utils.processes import run_command\nfrom pipenv.utils.project import ensure_project\nfrom pipenv.utils.shell import cmd_list_to_shell, project_python\nfrom pipenv.vendor import click, plette\n\n\ndef do_check(\n project,\n python=False,\n system=False,\n db=None,\n ignore=None,\n output=\"screen\",\n key=None,\n quiet=False,\n exit_code=True,\n policy_file=\"\",\n save_json=\"\",\n audit_and_monitor=True,\n safety_project=None,\n pypi_mirror=None,\n use_installed=False,\n categories=\"\",\n):\n import json\n\n if not system:\n # Ensure that virtualenv is available.\n ensure_project(\n project,\n python=python,\n validate=False,\n warn=False,\n pypi_mirror=pypi_mirror,\n )\n if not quiet and not project.s.is_quiet():\n click.secho(\"Checking PEP 508 requirements...\", bold=True)\n pep508checker_path = pep508checker.__file__.rstrip(\"cdo\")\n safety_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"patched\", \"safety\"\n )\n _cmd = [project_python(project, system=system)]\n # Run the PEP 508 checker in the virtualenv.\n cmd = _cmd + [Path(pep508checker_path).as_posix()]\n c = run_command(cmd, is_verbose=project.s.is_verbose())\n results = []\n if c.returncode is not None:\n try:\n results = simplejson.loads(c.stdout.strip())\n except json.JSONDecodeError:\n click.echo(\n \"{}\\n{}\\n{}\".format(\n click.style(\n \"Failed parsing pep508 results: \",\n fg=\"white\",\n bold=True,\n ),\n c.stdout.strip(),\n c.stderr.strip(),\n )\n )\n sys.exit(1)\n # Load the pipfile.\n p = plette.Pipfile.load(open(project.pipfile_location))\n p = plette.Lockfile.with_meta_from(p)\n failed = False\n # Assert each specified requirement.\n for marker, specifier in p._data[\"_meta\"][\"requires\"].items():\n if marker in results:\n try:\n assert results[marker] == specifier\n except AssertionError:\n failed = True\n click.echo(\n \"Specifier {} does not match {} ({}).\"\n \"\".format(\n click.style(marker, fg=\"green\"),\n click.style(specifier, fg=\"cyan\"),\n click.style(results[marker], fg=\"yellow\"),\n ),\n err=True,\n )\n if failed:\n click.secho(\"Failed!\", fg=\"red\", err=True)\n sys.exit(1)\n else:\n if not quiet and not project.s.is_quiet():\n click.secho(\"Passed!\", fg=\"green\")\n if not quiet and not project.s.is_quiet():\n click.secho(\n \"Checking installed packages for vulnerabilities...\",\n bold=True,\n )\n if ignore:\n if not isinstance(ignore, (tuple, list)):\n ignore = [ignore]\n ignored = [[\"--ignore\", cve] for cve in ignore]\n if not quiet and not project.s.is_quiet():\n click.echo(\n \"Notice: Ignoring Vulnerabilit{} {}\".format(\n \"ies\" if len(ignored) > 1 else \"y\",\n click.style(\", \".join(ignore), fg=\"yellow\"),\n ),\n err=True,\n )\n else:\n ignored = []\n\n options = [\n \"--audit-and-monitor\" if audit_and_monitor else \"--disable-audit-and-monitor\",\n \"--exit-code\" if exit_code else \"--continue-on-error\",\n ]\n\n if output == \"full-report\":\n options.append(\"--full-report\")\n elif output == \"minimal\":\n options.append(\"--json\")\n elif output not in [\"screen\", \"default\"]:\n options.append(f\"--output={output}\")\n\n if save_json:\n options.append(f\"--save-json={save_json}\")\n\n if policy_file:\n options.append(f\"--policy-file={policy_file}\")\n\n if safety_project:\n options.append(f\"--project={safety_project}\")\n\n if use_installed:\n target_venv_packages = run_command(\n _cmd + [\"-m\", \"pip\", \"list\", \"--format=freeze\"],\n is_verbose=project.s.is_verbose(),\n )\n elif categories:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\", \"--categories\", categories],\n is_verbose=project.s.is_verbose(),\n )\n else:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\"], is_verbose=project.s.is_verbose()\n )\n\n temp_requirements = tempfile.NamedTemporaryFile(\n mode=\"w+\",\n prefix=f\"{project.virtualenv_name}\",\n suffix=\"_requirements.txt\",\n delete=False,\n )\n temp_requirements.write(target_venv_packages.stdout.strip())\n temp_requirements.close()\n\n options.extend([\"--file\", temp_requirements.name])\n\n cmd = _cmd + [safety_path, \"check\"] + options\n\n if db:\n if not quiet and not project.s.is_quiet():\n click.echo(f\"Using {db} database\")\n cmd.append(f\"--db={db}\")\n elif key or project.s.PIPENV_PYUP_API_KEY:\n cmd = cmd + [f\"--key={key or project.s.PIPENV_PYUP_API_KEY}\"]\n else:\n PIPENV_SAFETY_DB = (\n \"https://d2qjmgddvqvu75.cloudfront.net/aws/safety/pipenv/1.0.0/\"\n )\n os.environ[\"SAFETY_ANNOUNCEMENTS_URL\"] = f\"{PIPENV_SAFETY_DB}announcements.json\"\n cmd.append(f\"--db={PIPENV_SAFETY_DB}\")\n\n if ignored:\n for cve in ignored:\n cmd += cve\n\n os.environ[\"SAFETY_CUSTOM_INTEGRATION\"] = \"True\"\n os.environ[\"SAFETY_SOURCE\"] = \"pipenv\"\n os.environ[\"SAFETY_PURE_YAML\"] = \"True\"\n\n from pipenv.patched.safety.cli import cli\n\n sys.argv = cmd[1:]\n\n if output == \"minimal\":\n from contextlib import redirect_stderr, redirect_stdout\n\n code = 0\n\n with redirect_stdout(io.StringIO()) as out, redirect_stderr(io.StringIO()) as err:\n try:\n cli(prog_name=\"pipenv\")\n except SystemExit as exit_signal:\n code = exit_signal.code\n\n report = out.getvalue()\n error = err.getvalue()\n\n try:\n json_report = simplejson.loads(report)\n except Exception:\n raise exceptions.PipenvCmdError(\n cmd_list_to_shell(cmd), report, error, exit_code=code\n )\n meta = json_report.get(\"report_meta\")\n vulnerabilities_found = meta.get(\"vulnerabilities_found\")\n\n fg = \"green\"\n message = \"All good!\"\n db_type = \"commercial\" if meta.get(\"api_key\", False) else \"free\"\n\n if vulnerabilities_found >= 0:\n fg = \"red\"\n message = (\n f\"Scan was complete using Safety\u2019s {db_type} vulnerability database.\"\n )\n\n click.echo()\n click.secho(f\"{vulnerabilities_found} vulnerabilities found.\", fg=fg)\n click.echo()\n\n vulnerabilities = json_report.get(\"vulnerabilities\", [])\n\n for vuln in vulnerabilities:\n click.echo(\n \"{}: {} {} open to vulnerability {} ({}). More info: {}\".format(\n click.style(vuln[\"vulnerability_id\"], bold=True, fg=\"red\"),\n click.style(vuln[\"package_name\"], fg=\"green\"),\n click.style(vuln[\"analyzed_version\"], fg=\"yellow\", bold=True),\n click.style(vuln[\"vulnerability_id\"], bold=True),\n click.style(vuln[\"vulnerable_spec\"], fg=\"yellow\", bold=False),\n click.style(vuln[\"more_info_url\"], bold=True),\n )\n )\n click.echo(f\"{vuln['advisory']}\")\n click.echo()\n\n click.secho(message, fg=\"white\", bold=True)\n sys.exit(code)\n\n cli(prog_name=\"pipenv\")\n\n temp_requirements.remove()\n", "path": "pipenv/routines/check.py"}], "after_files": [{"content": "import io\nimport json as simplejson\nimport os\nimport sys\nimport tempfile\nfrom pathlib import Path\n\nfrom pipenv import exceptions, pep508checker\nfrom pipenv.utils.processes import run_command\nfrom pipenv.utils.project import ensure_project\nfrom pipenv.utils.shell import cmd_list_to_shell, project_python\nfrom pipenv.vendor import click, plette\n\n\ndef do_check(\n project,\n python=False,\n system=False,\n db=None,\n ignore=None,\n output=\"screen\",\n key=None,\n quiet=False,\n exit_code=True,\n policy_file=\"\",\n save_json=\"\",\n audit_and_monitor=True,\n safety_project=None,\n pypi_mirror=None,\n use_installed=False,\n categories=\"\",\n):\n import json\n\n if not system:\n # Ensure that virtualenv is available.\n ensure_project(\n project,\n python=python,\n validate=False,\n warn=False,\n pypi_mirror=pypi_mirror,\n )\n if not quiet and not project.s.is_quiet():\n click.secho(\"Checking PEP 508 requirements...\", bold=True)\n pep508checker_path = pep508checker.__file__.rstrip(\"cdo\")\n safety_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"patched\", \"safety\"\n )\n _cmd = [project_python(project, system=system)]\n # Run the PEP 508 checker in the virtualenv.\n cmd = _cmd + [Path(pep508checker_path).as_posix()]\n c = run_command(cmd, is_verbose=project.s.is_verbose())\n results = []\n if c.returncode is not None:\n try:\n results = simplejson.loads(c.stdout.strip())\n except json.JSONDecodeError:\n click.echo(\n \"{}\\n{}\\n{}\".format(\n click.style(\n \"Failed parsing pep508 results: \",\n fg=\"white\",\n bold=True,\n ),\n c.stdout.strip(),\n c.stderr.strip(),\n )\n )\n sys.exit(1)\n # Load the pipfile.\n p = plette.Pipfile.load(open(project.pipfile_location))\n p = plette.Lockfile.with_meta_from(p)\n failed = False\n # Assert each specified requirement.\n for marker, specifier in p._data[\"_meta\"][\"requires\"].items():\n if marker in results:\n try:\n assert results[marker] == specifier\n except AssertionError:\n failed = True\n click.echo(\n \"Specifier {} does not match {} ({}).\"\n \"\".format(\n click.style(marker, fg=\"green\"),\n click.style(specifier, fg=\"cyan\"),\n click.style(results[marker], fg=\"yellow\"),\n ),\n err=True,\n )\n if failed:\n click.secho(\"Failed!\", fg=\"red\", err=True)\n sys.exit(1)\n else:\n if not quiet and not project.s.is_quiet():\n click.secho(\"Passed!\", fg=\"green\")\n if not quiet and not project.s.is_quiet():\n if use_installed:\n click.secho(\n \"Checking installed packages for vulnerabilities...\",\n bold=True,\n )\n else:\n click.secho(\n \"Checking Pipfile.lock packages for vulnerabilities...\",\n bold=True,\n )\n if ignore:\n if not isinstance(ignore, (tuple, list)):\n ignore = [ignore]\n ignored = [[\"--ignore\", cve] for cve in ignore]\n if not quiet and not project.s.is_quiet():\n click.echo(\n \"Notice: Ignoring Vulnerabilit{} {}\".format(\n \"ies\" if len(ignored) > 1 else \"y\",\n click.style(\", \".join(ignore), fg=\"yellow\"),\n ),\n err=True,\n )\n else:\n ignored = []\n\n options = [\n \"--audit-and-monitor\" if audit_and_monitor else \"--disable-audit-and-monitor\",\n \"--exit-code\" if exit_code else \"--continue-on-error\",\n ]\n\n if output == \"full-report\":\n options.append(\"--full-report\")\n elif output == \"minimal\":\n options.append(\"--json\")\n elif output not in [\"screen\", \"default\"]:\n options.append(f\"--output={output}\")\n\n if save_json:\n options.append(f\"--save-json={save_json}\")\n\n if policy_file:\n options.append(f\"--policy-file={policy_file}\")\n\n if safety_project:\n options.append(f\"--project={safety_project}\")\n\n if use_installed:\n target_venv_packages = run_command(\n _cmd + [\"-m\", \"pip\", \"list\", \"--format=freeze\"],\n is_verbose=project.s.is_verbose(),\n )\n elif categories:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\", \"--categories\", categories],\n is_verbose=project.s.is_verbose(),\n )\n else:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\"], is_verbose=project.s.is_verbose()\n )\n\n temp_requirements = tempfile.NamedTemporaryFile(\n mode=\"w+\",\n prefix=f\"{project.virtualenv_name}\",\n suffix=\"_requirements.txt\",\n delete=False,\n )\n temp_requirements.write(target_venv_packages.stdout.strip())\n temp_requirements.close()\n\n options.extend([\"--file\", temp_requirements.name])\n\n cmd = _cmd + [safety_path, \"check\"] + options\n\n if db:\n if not quiet and not project.s.is_quiet():\n click.echo(f\"Using {db} database\")\n cmd.append(f\"--db={db}\")\n elif key or project.s.PIPENV_PYUP_API_KEY:\n cmd = cmd + [f\"--key={key or project.s.PIPENV_PYUP_API_KEY}\"]\n else:\n PIPENV_SAFETY_DB = (\n \"https://d2qjmgddvqvu75.cloudfront.net/aws/safety/pipenv/1.0.0/\"\n )\n os.environ[\"SAFETY_ANNOUNCEMENTS_URL\"] = f\"{PIPENV_SAFETY_DB}announcements.json\"\n cmd.append(f\"--db={PIPENV_SAFETY_DB}\")\n\n if ignored:\n for cve in ignored:\n cmd += cve\n\n os.environ[\"SAFETY_CUSTOM_INTEGRATION\"] = \"True\"\n os.environ[\"SAFETY_SOURCE\"] = \"pipenv\"\n os.environ[\"SAFETY_PURE_YAML\"] = \"True\"\n\n from pipenv.patched.safety.cli import cli\n\n sys.argv = cmd[1:]\n\n if output == \"minimal\":\n from contextlib import redirect_stderr, redirect_stdout\n\n code = 0\n\n with redirect_stdout(io.StringIO()) as out, redirect_stderr(io.StringIO()) as err:\n try:\n cli(prog_name=\"pipenv\")\n except SystemExit as exit_signal:\n code = exit_signal.code\n\n report = out.getvalue()\n error = err.getvalue()\n\n try:\n json_report = simplejson.loads(report)\n except Exception:\n raise exceptions.PipenvCmdError(\n cmd_list_to_shell(cmd), report, error, exit_code=code\n )\n meta = json_report.get(\"report_meta\")\n vulnerabilities_found = meta.get(\"vulnerabilities_found\")\n\n fg = \"green\"\n message = \"All good!\"\n db_type = \"commercial\" if meta.get(\"api_key\", False) else \"free\"\n\n if vulnerabilities_found >= 0:\n fg = \"red\"\n message = (\n f\"Scan was complete using Safety\u2019s {db_type} vulnerability database.\"\n )\n\n click.echo()\n click.secho(f\"{vulnerabilities_found} vulnerabilities found.\", fg=fg)\n click.echo()\n\n vulnerabilities = json_report.get(\"vulnerabilities\", [])\n\n for vuln in vulnerabilities:\n click.echo(\n \"{}: {} {} open to vulnerability {} ({}). More info: {}\".format(\n click.style(vuln[\"vulnerability_id\"], bold=True, fg=\"red\"),\n click.style(vuln[\"package_name\"], fg=\"green\"),\n click.style(vuln[\"analyzed_version\"], fg=\"yellow\", bold=True),\n click.style(vuln[\"vulnerability_id\"], bold=True),\n click.style(vuln[\"vulnerable_spec\"], fg=\"yellow\", bold=False),\n click.style(vuln[\"more_info_url\"], bold=True),\n )\n )\n click.echo(f\"{vuln['advisory']}\")\n click.echo()\n\n click.secho(message, fg=\"white\", bold=True)\n sys.exit(code)\n\n cli(prog_name=\"pipenv\")\n\n temp_requirements.remove()\n", "path": "pipenv/routines/check.py"}]} | 3,119 | 191 |
gh_patches_debug_29316 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v7 reverse proxying tcp protocols crashing
#### Problem Description
i'm trying to proxy tcp protocols and periodically getting a crash:
./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181
```console
% ./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181
Proxy server listening at http://*:8181
[::1]:64076: client connect
[::1]:64076: server connect pop.gmail.com (172.253.123.109:995)
[::1]:64076: mitmproxy has crashed!
Traceback (most recent call last):
File "mitmproxy/proxy/server.py", line 279, in server_event
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/tunnel.py", line 72, in _handle_event
File "mitmproxy/proxy/layers/tls.py", line 240, in receive_data
File "mitmproxy/proxy/layers/tls.py", line 306, in event_to_child
File "mitmproxy/proxy/tunnel.py", line 104, in event_to_child
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/tunnel.py", line 72, in _handle_event
File "mitmproxy/proxy/layers/tls.py", line 225, in receive_data
AttributeError: 'NoneType' object has no attribute 'bio_write'
[::1]:64076: client disconnect
[::1]:64076: closing transports...
[::1]:64076: server disconnect pop.gmail.com (172.253.123.109:995)
[::1]:64076: transports closed!
[::1]:64180: client connect
[::1]:64180: server connect pop.gmail.com (172.253.123.109:995)
[::1]:64180 <- tcp <- pop.gmail.com:995
+OK Gpop ready for requests from 24.137.254.57 s4mb63968478vsi
[::1]:64180 -> tcp -> pop.gmail.com:995
CAPA
[::1]:64180 <- tcp <- pop.gmail.com:995
+OK Capability list follows
USER
RESP-CODES
EXPIRE 0
LOGIN-DELAY 300
TOP
UIDL
X-GOOGLE-RICO
SASL PLAIN XOAUTH2 OAUTHBEARER
.
[::1]:64180: half-closing Server(pop.gmail.com:995, state=open, tls, src_port=64181)
[::1]:64180: server disconnect pop.gmail.com (172.253.123.109:995)
[::1]:64180: client disconnect
[::1]:64180: closing transports...
[::1]:64180: transports closed!
```
#### System Information
% ./mitmdump --version
Mitmproxy: 7.0.0.dev binary
Python: 3.9.5
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: macOS-11.2.3-x86_64-i386-64bit
v7 reverse proxying tcp protocols crashing
#### Problem Description
i'm trying to proxy tcp protocols and periodically getting a crash:
./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181
```console
% ./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181
Proxy server listening at http://*:8181
[::1]:64076: client connect
[::1]:64076: server connect pop.gmail.com (172.253.123.109:995)
[::1]:64076: mitmproxy has crashed!
Traceback (most recent call last):
File "mitmproxy/proxy/server.py", line 279, in server_event
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/tunnel.py", line 72, in _handle_event
File "mitmproxy/proxy/layers/tls.py", line 240, in receive_data
File "mitmproxy/proxy/layers/tls.py", line 306, in event_to_child
File "mitmproxy/proxy/tunnel.py", line 104, in event_to_child
File "mitmproxy/proxy/layer.py", line 144, in handle_event
File "mitmproxy/proxy/tunnel.py", line 72, in _handle_event
File "mitmproxy/proxy/layers/tls.py", line 225, in receive_data
AttributeError: 'NoneType' object has no attribute 'bio_write'
[::1]:64076: client disconnect
[::1]:64076: closing transports...
[::1]:64076: server disconnect pop.gmail.com (172.253.123.109:995)
[::1]:64076: transports closed!
[::1]:64180: client connect
[::1]:64180: server connect pop.gmail.com (172.253.123.109:995)
[::1]:64180 <- tcp <- pop.gmail.com:995
+OK Gpop ready for requests from 24.137.254.57 s4mb63968478vsi
[::1]:64180 -> tcp -> pop.gmail.com:995
CAPA
[::1]:64180 <- tcp <- pop.gmail.com:995
+OK Capability list follows
USER
RESP-CODES
EXPIRE 0
LOGIN-DELAY 300
TOP
UIDL
X-GOOGLE-RICO
SASL PLAIN XOAUTH2 OAUTHBEARER
.
[::1]:64180: half-closing Server(pop.gmail.com:995, state=open, tls, src_port=64181)
[::1]:64180: server disconnect pop.gmail.com (172.253.123.109:995)
[::1]:64180: client disconnect
[::1]:64180: closing transports...
[::1]:64180: transports closed!
```
#### System Information
% ./mitmdump --version
Mitmproxy: 7.0.0.dev binary
Python: 3.9.5
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: macOS-11.2.3-x86_64-i386-64bit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/next_layer.py`
Content:
```
1 import re
2 from typing import Type, Sequence, Union, Tuple, Any, Iterable, Optional, List
3
4 from mitmproxy import ctx, exceptions, connection
5 from mitmproxy.net.tls import is_tls_record_magic
6 from mitmproxy.proxy.layers.http import HTTPMode
7 from mitmproxy.proxy import context, layer, layers
8 from mitmproxy.proxy.layers import modes
9 from mitmproxy.proxy.layers.tls import HTTP_ALPNS, parse_client_hello
10
11 LayerCls = Type[layer.Layer]
12
13
14 def stack_match(
15 context: context.Context,
16 layers: Sequence[Union[LayerCls, Tuple[LayerCls, ...]]]
17 ) -> bool:
18 if len(context.layers) != len(layers):
19 return False
20 return all(
21 expected is Any or isinstance(actual, expected)
22 for actual, expected in zip(context.layers, layers)
23 )
24
25
26 class NextLayer:
27 ignore_hosts: Iterable[re.Pattern] = ()
28 allow_hosts: Iterable[re.Pattern] = ()
29 tcp_hosts: Iterable[re.Pattern] = ()
30
31 def configure(self, updated):
32 if "tcp_hosts" in updated:
33 self.tcp_hosts = [
34 re.compile(x, re.IGNORECASE) for x in ctx.options.tcp_hosts
35 ]
36 if "allow_hosts" in updated or "ignore_hosts" in updated:
37 if ctx.options.allow_hosts and ctx.options.ignore_hosts:
38 raise exceptions.OptionsError("The allow_hosts and ignore_hosts options are mutually exclusive.")
39 self.ignore_hosts = [
40 re.compile(x, re.IGNORECASE) for x in ctx.options.ignore_hosts
41 ]
42 self.allow_hosts = [
43 re.compile(x, re.IGNORECASE) for x in ctx.options.allow_hosts
44 ]
45
46 def ignore_connection(self, server_address: Optional[connection.Address], data_client: bytes) -> Optional[bool]:
47 """
48 Returns:
49 True, if the connection should be ignored.
50 False, if it should not be ignored.
51 None, if we need to wait for more input data.
52 """
53 if not ctx.options.ignore_hosts and not ctx.options.allow_hosts:
54 return False
55
56 hostnames: List[str] = []
57 if server_address is not None:
58 hostnames.append(server_address[0])
59 if is_tls_record_magic(data_client):
60 try:
61 ch = parse_client_hello(data_client)
62 if ch is None: # not complete yet
63 return None
64 sni = ch.sni
65 except ValueError:
66 pass
67 else:
68 if sni:
69 hostnames.append(sni)
70
71 if not hostnames:
72 return False
73
74 if ctx.options.ignore_hosts:
75 return any(
76 re.search(rex, host, re.IGNORECASE)
77 for host in hostnames
78 for rex in ctx.options.ignore_hosts
79 )
80 elif ctx.options.allow_hosts:
81 return not any(
82 re.search(rex, host, re.IGNORECASE)
83 for host in hostnames
84 for rex in ctx.options.allow_hosts
85 )
86 else: # pragma: no cover
87 raise AssertionError()
88
89 def next_layer(self, nextlayer: layer.NextLayer):
90 nextlayer.layer = self._next_layer(
91 nextlayer.context,
92 nextlayer.data_client(),
93 nextlayer.data_server(),
94 )
95
96 def _next_layer(self, context: context.Context, data_client: bytes, data_server: bytes) -> Optional[layer.Layer]:
97 if len(context.layers) == 0:
98 return self.make_top_layer(context)
99
100 if len(data_client) < 3 and not data_server:
101 return None
102
103 client_tls = is_tls_record_magic(data_client)
104
105 def s(*layers):
106 return stack_match(context, layers)
107
108 # 1. check for --ignore/--allow
109 ignore = self.ignore_connection(context.server.address, data_client)
110 if ignore is True:
111 return layers.TCPLayer(context, ignore=True)
112 if ignore is None:
113 return None
114
115 # 2. Check for TLS
116 if client_tls:
117 # client tls usually requires a server tls layer as parent layer, except:
118 # - reverse proxy mode manages this itself.
119 # - a secure web proxy doesn't have a server part.
120 if s(modes.ReverseProxy) or s(modes.HttpProxy):
121 return layers.ClientTLSLayer(context)
122 else:
123 # We already assign the next layer here os that ServerTLSLayer
124 # knows that it can safely wait for a ClientHello.
125 ret = layers.ServerTLSLayer(context)
126 ret.child_layer = layers.ClientTLSLayer(context)
127 return ret
128
129 # 3. Setup the HTTP layer for a regular HTTP proxy or an upstream proxy.
130 if any([
131 s(modes.HttpProxy),
132 # or a "Secure Web Proxy", see https://www.chromium.org/developers/design-documents/secure-web-proxy
133 s(modes.HttpProxy, layers.ClientTLSLayer),
134 ]):
135 if ctx.options.mode == "regular":
136 return layers.HttpLayer(context, HTTPMode.regular)
137 else:
138 return layers.HttpLayer(context, HTTPMode.upstream)
139
140 # 4. Check for --tcp
141 if any(
142 (context.server.address and rex.search(context.server.address[0])) or
143 (context.client.sni and rex.search(context.client.sni))
144 for rex in self.tcp_hosts
145 ):
146 return layers.TCPLayer(context)
147
148 # 5. Check for raw tcp mode.
149 very_likely_http = (
150 context.client.alpn and context.client.alpn in HTTP_ALPNS
151 )
152 probably_no_http = not very_likely_http and (
153 not data_client[:3].isalpha() # the first three bytes should be the HTTP verb, so A-Za-z is expected.
154 or data_server # a server greeting would be uncharacteristic.
155 )
156 if ctx.options.rawtcp and probably_no_http:
157 return layers.TCPLayer(context)
158
159 # 6. Assume HTTP by default.
160 return layers.HttpLayer(context, HTTPMode.transparent)
161
162 def make_top_layer(self, context: context.Context) -> layer.Layer:
163 if ctx.options.mode == "regular" or ctx.options.mode.startswith("upstream:"):
164 return layers.modes.HttpProxy(context)
165
166 elif ctx.options.mode == "transparent":
167 return layers.modes.TransparentProxy(context)
168
169 elif ctx.options.mode.startswith("reverse:"):
170 return layers.modes.ReverseProxy(context)
171
172 elif ctx.options.mode == "socks5":
173 return layers.modes.Socks5Proxy(context)
174
175 else: # pragma: no cover
176 raise AssertionError("Unknown mode.")
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/next_layer.py b/mitmproxy/addons/next_layer.py
--- a/mitmproxy/addons/next_layer.py
+++ b/mitmproxy/addons/next_layer.py
@@ -115,9 +115,13 @@
# 2. Check for TLS
if client_tls:
# client tls usually requires a server tls layer as parent layer, except:
- # - reverse proxy mode manages this itself.
# - a secure web proxy doesn't have a server part.
- if s(modes.ReverseProxy) or s(modes.HttpProxy):
+ # - reverse proxy mode manages this itself.
+ if (
+ s(modes.HttpProxy) or
+ s(modes.ReverseProxy) or
+ s(modes.ReverseProxy, layers.ServerTLSLayer)
+ ):
return layers.ClientTLSLayer(context)
else:
# We already assign the next layer here os that ServerTLSLayer
@@ -127,11 +131,11 @@
return ret
# 3. Setup the HTTP layer for a regular HTTP proxy or an upstream proxy.
- if any([
- s(modes.HttpProxy),
+ if (
+ s(modes.HttpProxy) or
# or a "Secure Web Proxy", see https://www.chromium.org/developers/design-documents/secure-web-proxy
- s(modes.HttpProxy, layers.ClientTLSLayer),
- ]):
+ s(modes.HttpProxy, layers.ClientTLSLayer)
+ ):
if ctx.options.mode == "regular":
return layers.HttpLayer(context, HTTPMode.regular)
else:
| {"golden_diff": "diff --git a/mitmproxy/addons/next_layer.py b/mitmproxy/addons/next_layer.py\n--- a/mitmproxy/addons/next_layer.py\n+++ b/mitmproxy/addons/next_layer.py\n@@ -115,9 +115,13 @@\n # 2. Check for TLS\n if client_tls:\n # client tls usually requires a server tls layer as parent layer, except:\n- # - reverse proxy mode manages this itself.\n # - a secure web proxy doesn't have a server part.\n- if s(modes.ReverseProxy) or s(modes.HttpProxy):\n+ # - reverse proxy mode manages this itself.\n+ if (\n+ s(modes.HttpProxy) or\n+ s(modes.ReverseProxy) or\n+ s(modes.ReverseProxy, layers.ServerTLSLayer)\n+ ):\n return layers.ClientTLSLayer(context)\n else:\n # We already assign the next layer here os that ServerTLSLayer\n@@ -127,11 +131,11 @@\n return ret\n \n # 3. Setup the HTTP layer for a regular HTTP proxy or an upstream proxy.\n- if any([\n- s(modes.HttpProxy),\n+ if (\n+ s(modes.HttpProxy) or\n # or a \"Secure Web Proxy\", see https://www.chromium.org/developers/design-documents/secure-web-proxy\n- s(modes.HttpProxy, layers.ClientTLSLayer),\n- ]):\n+ s(modes.HttpProxy, layers.ClientTLSLayer)\n+ ):\n if ctx.options.mode == \"regular\":\n return layers.HttpLayer(context, HTTPMode.regular)\n else:\n", "issue": "v7 reverse proxying tcp protocols crashing\n#### Problem Description\r\n\r\ni'm trying to proxy tcp protocols and periodically getting a crash:\r\n\r\n./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181\r\n\r\n```console\r\n% ./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181\r\nProxy server listening at http://*:8181\r\n[::1]:64076: client connect\r\n[::1]:64076: server connect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64076: mitmproxy has crashed!\r\nTraceback (most recent call last):\r\n File \"mitmproxy/proxy/server.py\", line 279, in server_event\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/tunnel.py\", line 72, in _handle_event\r\n File \"mitmproxy/proxy/layers/tls.py\", line 240, in receive_data\r\n File \"mitmproxy/proxy/layers/tls.py\", line 306, in event_to_child\r\n File \"mitmproxy/proxy/tunnel.py\", line 104, in event_to_child\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/tunnel.py\", line 72, in _handle_event\r\n File \"mitmproxy/proxy/layers/tls.py\", line 225, in receive_data\r\nAttributeError: 'NoneType' object has no attribute 'bio_write'\r\n\r\n[::1]:64076: client disconnect\r\n[::1]:64076: closing transports...\r\n[::1]:64076: server disconnect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64076: transports closed!\r\n[::1]:64180: client connect\r\n[::1]:64180: server connect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64180 <- tcp <- pop.gmail.com:995\r\n\r\n +OK Gpop ready for requests from 24.137.254.57 s4mb63968478vsi\r\n\r\n[::1]:64180 -> tcp -> pop.gmail.com:995\r\n\r\n CAPA\r\n\r\n[::1]:64180 <- tcp <- pop.gmail.com:995\r\n\r\n +OK Capability list follows\r\n USER\r\n RESP-CODES\r\n EXPIRE 0\r\n LOGIN-DELAY 300\r\n TOP\r\n UIDL\r\n X-GOOGLE-RICO\r\n SASL PLAIN XOAUTH2 OAUTHBEARER\r\n .\r\n\r\n[::1]:64180: half-closing Server(pop.gmail.com:995, state=open, tls, src_port=64181)\r\n[::1]:64180: server disconnect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64180: client disconnect\r\n[::1]:64180: closing transports...\r\n[::1]:64180: transports closed!\r\n```\r\n\r\n#### System Information\r\n\r\n% ./mitmdump --version\r\nMitmproxy: 7.0.0.dev binary\r\nPython: 3.9.5\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: macOS-11.2.3-x86_64-i386-64bit\r\n\nv7 reverse proxying tcp protocols crashing\n#### Problem Description\r\n\r\ni'm trying to proxy tcp protocols and periodically getting a crash:\r\n\r\n./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181\r\n\r\n```console\r\n% ./mitmdump --set termlog_verbosity=debug --mode reverse:https://pop.gmail.com:995 --set connection_strategy=eager -w mail --set flow_detail=3 -p 8181\r\nProxy server listening at http://*:8181\r\n[::1]:64076: client connect\r\n[::1]:64076: server connect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64076: mitmproxy has crashed!\r\nTraceback (most recent call last):\r\n File \"mitmproxy/proxy/server.py\", line 279, in server_event\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/tunnel.py\", line 72, in _handle_event\r\n File \"mitmproxy/proxy/layers/tls.py\", line 240, in receive_data\r\n File \"mitmproxy/proxy/layers/tls.py\", line 306, in event_to_child\r\n File \"mitmproxy/proxy/tunnel.py\", line 104, in event_to_child\r\n File \"mitmproxy/proxy/layer.py\", line 144, in handle_event\r\n File \"mitmproxy/proxy/tunnel.py\", line 72, in _handle_event\r\n File \"mitmproxy/proxy/layers/tls.py\", line 225, in receive_data\r\nAttributeError: 'NoneType' object has no attribute 'bio_write'\r\n\r\n[::1]:64076: client disconnect\r\n[::1]:64076: closing transports...\r\n[::1]:64076: server disconnect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64076: transports closed!\r\n[::1]:64180: client connect\r\n[::1]:64180: server connect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64180 <- tcp <- pop.gmail.com:995\r\n\r\n +OK Gpop ready for requests from 24.137.254.57 s4mb63968478vsi\r\n\r\n[::1]:64180 -> tcp -> pop.gmail.com:995\r\n\r\n CAPA\r\n\r\n[::1]:64180 <- tcp <- pop.gmail.com:995\r\n\r\n +OK Capability list follows\r\n USER\r\n RESP-CODES\r\n EXPIRE 0\r\n LOGIN-DELAY 300\r\n TOP\r\n UIDL\r\n X-GOOGLE-RICO\r\n SASL PLAIN XOAUTH2 OAUTHBEARER\r\n .\r\n\r\n[::1]:64180: half-closing Server(pop.gmail.com:995, state=open, tls, src_port=64181)\r\n[::1]:64180: server disconnect pop.gmail.com (172.253.123.109:995)\r\n[::1]:64180: client disconnect\r\n[::1]:64180: closing transports...\r\n[::1]:64180: transports closed!\r\n```\r\n\r\n#### System Information\r\n\r\n% ./mitmdump --version\r\nMitmproxy: 7.0.0.dev binary\r\nPython: 3.9.5\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: macOS-11.2.3-x86_64-i386-64bit\r\n\n", "before_files": [{"content": "import re\nfrom typing import Type, Sequence, Union, Tuple, Any, Iterable, Optional, List\n\nfrom mitmproxy import ctx, exceptions, connection\nfrom mitmproxy.net.tls import is_tls_record_magic\nfrom mitmproxy.proxy.layers.http import HTTPMode\nfrom mitmproxy.proxy import context, layer, layers\nfrom mitmproxy.proxy.layers import modes\nfrom mitmproxy.proxy.layers.tls import HTTP_ALPNS, parse_client_hello\n\nLayerCls = Type[layer.Layer]\n\n\ndef stack_match(\n context: context.Context,\n layers: Sequence[Union[LayerCls, Tuple[LayerCls, ...]]]\n) -> bool:\n if len(context.layers) != len(layers):\n return False\n return all(\n expected is Any or isinstance(actual, expected)\n for actual, expected in zip(context.layers, layers)\n )\n\n\nclass NextLayer:\n ignore_hosts: Iterable[re.Pattern] = ()\n allow_hosts: Iterable[re.Pattern] = ()\n tcp_hosts: Iterable[re.Pattern] = ()\n\n def configure(self, updated):\n if \"tcp_hosts\" in updated:\n self.tcp_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.tcp_hosts\n ]\n if \"allow_hosts\" in updated or \"ignore_hosts\" in updated:\n if ctx.options.allow_hosts and ctx.options.ignore_hosts:\n raise exceptions.OptionsError(\"The allow_hosts and ignore_hosts options are mutually exclusive.\")\n self.ignore_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.ignore_hosts\n ]\n self.allow_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.allow_hosts\n ]\n\n def ignore_connection(self, server_address: Optional[connection.Address], data_client: bytes) -> Optional[bool]:\n \"\"\"\n Returns:\n True, if the connection should be ignored.\n False, if it should not be ignored.\n None, if we need to wait for more input data.\n \"\"\"\n if not ctx.options.ignore_hosts and not ctx.options.allow_hosts:\n return False\n\n hostnames: List[str] = []\n if server_address is not None:\n hostnames.append(server_address[0])\n if is_tls_record_magic(data_client):\n try:\n ch = parse_client_hello(data_client)\n if ch is None: # not complete yet\n return None\n sni = ch.sni\n except ValueError:\n pass\n else:\n if sni:\n hostnames.append(sni)\n\n if not hostnames:\n return False\n\n if ctx.options.ignore_hosts:\n return any(\n re.search(rex, host, re.IGNORECASE)\n for host in hostnames\n for rex in ctx.options.ignore_hosts\n )\n elif ctx.options.allow_hosts:\n return not any(\n re.search(rex, host, re.IGNORECASE)\n for host in hostnames\n for rex in ctx.options.allow_hosts\n )\n else: # pragma: no cover\n raise AssertionError()\n\n def next_layer(self, nextlayer: layer.NextLayer):\n nextlayer.layer = self._next_layer(\n nextlayer.context,\n nextlayer.data_client(),\n nextlayer.data_server(),\n )\n\n def _next_layer(self, context: context.Context, data_client: bytes, data_server: bytes) -> Optional[layer.Layer]:\n if len(context.layers) == 0:\n return self.make_top_layer(context)\n\n if len(data_client) < 3 and not data_server:\n return None\n\n client_tls = is_tls_record_magic(data_client)\n\n def s(*layers):\n return stack_match(context, layers)\n\n # 1. check for --ignore/--allow\n ignore = self.ignore_connection(context.server.address, data_client)\n if ignore is True:\n return layers.TCPLayer(context, ignore=True)\n if ignore is None:\n return None\n\n # 2. Check for TLS\n if client_tls:\n # client tls usually requires a server tls layer as parent layer, except:\n # - reverse proxy mode manages this itself.\n # - a secure web proxy doesn't have a server part.\n if s(modes.ReverseProxy) or s(modes.HttpProxy):\n return layers.ClientTLSLayer(context)\n else:\n # We already assign the next layer here os that ServerTLSLayer\n # knows that it can safely wait for a ClientHello.\n ret = layers.ServerTLSLayer(context)\n ret.child_layer = layers.ClientTLSLayer(context)\n return ret\n\n # 3. Setup the HTTP layer for a regular HTTP proxy or an upstream proxy.\n if any([\n s(modes.HttpProxy),\n # or a \"Secure Web Proxy\", see https://www.chromium.org/developers/design-documents/secure-web-proxy\n s(modes.HttpProxy, layers.ClientTLSLayer),\n ]):\n if ctx.options.mode == \"regular\":\n return layers.HttpLayer(context, HTTPMode.regular)\n else:\n return layers.HttpLayer(context, HTTPMode.upstream)\n\n # 4. Check for --tcp\n if any(\n (context.server.address and rex.search(context.server.address[0])) or\n (context.client.sni and rex.search(context.client.sni))\n for rex in self.tcp_hosts\n ):\n return layers.TCPLayer(context)\n\n # 5. Check for raw tcp mode.\n very_likely_http = (\n context.client.alpn and context.client.alpn in HTTP_ALPNS\n )\n probably_no_http = not very_likely_http and (\n not data_client[:3].isalpha() # the first three bytes should be the HTTP verb, so A-Za-z is expected.\n or data_server # a server greeting would be uncharacteristic.\n )\n if ctx.options.rawtcp and probably_no_http:\n return layers.TCPLayer(context)\n\n # 6. Assume HTTP by default.\n return layers.HttpLayer(context, HTTPMode.transparent)\n\n def make_top_layer(self, context: context.Context) -> layer.Layer:\n if ctx.options.mode == \"regular\" or ctx.options.mode.startswith(\"upstream:\"):\n return layers.modes.HttpProxy(context)\n\n elif ctx.options.mode == \"transparent\":\n return layers.modes.TransparentProxy(context)\n\n elif ctx.options.mode.startswith(\"reverse:\"):\n return layers.modes.ReverseProxy(context)\n\n elif ctx.options.mode == \"socks5\":\n return layers.modes.Socks5Proxy(context)\n\n else: # pragma: no cover\n raise AssertionError(\"Unknown mode.\")\n", "path": "mitmproxy/addons/next_layer.py"}], "after_files": [{"content": "import re\nfrom typing import Type, Sequence, Union, Tuple, Any, Iterable, Optional, List\n\nfrom mitmproxy import ctx, exceptions, connection\nfrom mitmproxy.net.tls import is_tls_record_magic\nfrom mitmproxy.proxy.layers.http import HTTPMode\nfrom mitmproxy.proxy import context, layer, layers\nfrom mitmproxy.proxy.layers import modes\nfrom mitmproxy.proxy.layers.tls import HTTP_ALPNS, parse_client_hello\n\nLayerCls = Type[layer.Layer]\n\n\ndef stack_match(\n context: context.Context,\n layers: Sequence[Union[LayerCls, Tuple[LayerCls, ...]]]\n) -> bool:\n if len(context.layers) != len(layers):\n return False\n return all(\n expected is Any or isinstance(actual, expected)\n for actual, expected in zip(context.layers, layers)\n )\n\n\nclass NextLayer:\n ignore_hosts: Iterable[re.Pattern] = ()\n allow_hosts: Iterable[re.Pattern] = ()\n tcp_hosts: Iterable[re.Pattern] = ()\n\n def configure(self, updated):\n if \"tcp_hosts\" in updated:\n self.tcp_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.tcp_hosts\n ]\n if \"allow_hosts\" in updated or \"ignore_hosts\" in updated:\n if ctx.options.allow_hosts and ctx.options.ignore_hosts:\n raise exceptions.OptionsError(\"The allow_hosts and ignore_hosts options are mutually exclusive.\")\n self.ignore_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.ignore_hosts\n ]\n self.allow_hosts = [\n re.compile(x, re.IGNORECASE) for x in ctx.options.allow_hosts\n ]\n\n def ignore_connection(self, server_address: Optional[connection.Address], data_client: bytes) -> Optional[bool]:\n \"\"\"\n Returns:\n True, if the connection should be ignored.\n False, if it should not be ignored.\n None, if we need to wait for more input data.\n \"\"\"\n if not ctx.options.ignore_hosts and not ctx.options.allow_hosts:\n return False\n\n hostnames: List[str] = []\n if server_address is not None:\n hostnames.append(server_address[0])\n if is_tls_record_magic(data_client):\n try:\n ch = parse_client_hello(data_client)\n if ch is None: # not complete yet\n return None\n sni = ch.sni\n except ValueError:\n pass\n else:\n if sni:\n hostnames.append(sni)\n\n if not hostnames:\n return False\n\n if ctx.options.ignore_hosts:\n return any(\n re.search(rex, host, re.IGNORECASE)\n for host in hostnames\n for rex in ctx.options.ignore_hosts\n )\n elif ctx.options.allow_hosts:\n return not any(\n re.search(rex, host, re.IGNORECASE)\n for host in hostnames\n for rex in ctx.options.allow_hosts\n )\n else: # pragma: no cover\n raise AssertionError()\n\n def next_layer(self, nextlayer: layer.NextLayer):\n nextlayer.layer = self._next_layer(\n nextlayer.context,\n nextlayer.data_client(),\n nextlayer.data_server(),\n )\n\n def _next_layer(self, context: context.Context, data_client: bytes, data_server: bytes) -> Optional[layer.Layer]:\n if len(context.layers) == 0:\n return self.make_top_layer(context)\n\n if len(data_client) < 3 and not data_server:\n return None\n\n client_tls = is_tls_record_magic(data_client)\n\n def s(*layers):\n return stack_match(context, layers)\n\n # 1. check for --ignore/--allow\n ignore = self.ignore_connection(context.server.address, data_client)\n if ignore is True:\n return layers.TCPLayer(context, ignore=True)\n if ignore is None:\n return None\n\n # 2. Check for TLS\n if client_tls:\n # client tls usually requires a server tls layer as parent layer, except:\n # - a secure web proxy doesn't have a server part.\n # - reverse proxy mode manages this itself.\n if (\n s(modes.HttpProxy) or\n s(modes.ReverseProxy) or\n s(modes.ReverseProxy, layers.ServerTLSLayer)\n ):\n return layers.ClientTLSLayer(context)\n else:\n # We already assign the next layer here os that ServerTLSLayer\n # knows that it can safely wait for a ClientHello.\n ret = layers.ServerTLSLayer(context)\n ret.child_layer = layers.ClientTLSLayer(context)\n return ret\n\n # 3. Setup the HTTP layer for a regular HTTP proxy or an upstream proxy.\n if (\n s(modes.HttpProxy) or\n # or a \"Secure Web Proxy\", see https://www.chromium.org/developers/design-documents/secure-web-proxy\n s(modes.HttpProxy, layers.ClientTLSLayer)\n ):\n if ctx.options.mode == \"regular\":\n return layers.HttpLayer(context, HTTPMode.regular)\n else:\n return layers.HttpLayer(context, HTTPMode.upstream)\n\n # 4. Check for --tcp\n if any(\n (context.server.address and rex.search(context.server.address[0])) or\n (context.client.sni and rex.search(context.client.sni))\n for rex in self.tcp_hosts\n ):\n return layers.TCPLayer(context)\n\n # 5. Check for raw tcp mode.\n very_likely_http = (\n context.client.alpn and context.client.alpn in HTTP_ALPNS\n )\n probably_no_http = not very_likely_http and (\n not data_client[:3].isalpha() # the first three bytes should be the HTTP verb, so A-Za-z is expected.\n or data_server # a server greeting would be uncharacteristic.\n )\n if ctx.options.rawtcp and probably_no_http:\n return layers.TCPLayer(context)\n\n # 6. Assume HTTP by default.\n return layers.HttpLayer(context, HTTPMode.transparent)\n\n def make_top_layer(self, context: context.Context) -> layer.Layer:\n if ctx.options.mode == \"regular\" or ctx.options.mode.startswith(\"upstream:\"):\n return layers.modes.HttpProxy(context)\n\n elif ctx.options.mode == \"transparent\":\n return layers.modes.TransparentProxy(context)\n\n elif ctx.options.mode.startswith(\"reverse:\"):\n return layers.modes.ReverseProxy(context)\n\n elif ctx.options.mode == \"socks5\":\n return layers.modes.Socks5Proxy(context)\n\n else: # pragma: no cover\n raise AssertionError(\"Unknown mode.\")\n", "path": "mitmproxy/addons/next_layer.py"}]} | 3,970 | 363 |
gh_patches_debug_67195 | rasdani/github-patches | git_diff | flairNLP__flair-3123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Avg Pooling in the Entity Linker
### Describe the bug
A runtime error is raised upon prediction when using "average" as the pooling operation in the Entity Linker
### To Reproduce
```python
from flair.data import Corpus
from flair.datasets import NEL_ENGLISH_TWEEKI
from flair.embeddings import TransformerWordEmbeddings
from flair.models import EntityLinker
corpus: Corpus = NEL_ENGLISH_TWEEKI(sample_missing_splits=False)
embeddings = TransformerWordEmbeddings(
model="distilbert-base-uncased",
fine_tune=True,
)
entity_linker = EntityLinker(
embeddings=embeddings,
label_dictionary=corpus.make_label_dictionary(label_type="nel"),
label_type="nel",
pooling_operation="average",
)
entity_linker.predict(corpus.train[0])
```
### Expected behaivor
The Entity Linker should be able to perform average pooling without any issues, like in the case of other pooling options.
### Logs and Stack traces
```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In [1], line 20
8 embeddings = TransformerWordEmbeddings(
9 model="distilbert-base-uncased",
10 fine_tune=True,
11 )
13 entity_linker = EntityLinker(
14 embeddings=embeddings,
15 label_dictionary=corpus.make_label_dictionary(label_type="nel"),
16 label_type="nel",
17 pooling_operation="average",
18 )
---> 20 entity_linker.predict(corpus.train[0])
File ~/projects/flair_forked/flair/nn/model.py:826, in DefaultClassifier.predict(self, sentences, mini_batch_size, return_probabilities_for_all_classes, verbose, label_name, return_loss, embedding_storage_mode)
824 # pass data points through network and decode
825 data_point_tensor = self._encode_data_points(batch, data_points)
--> 826 scores = self.decoder(data_point_tensor)
827 scores = self._mask_scores(scores, data_points)
829 # if anything could possibly be predicted
File ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x4 and 768x650)
```
### Screenshots
_No response_
### Additional Context
_No response_
### Environment
#### Versions:
##### Flair
0.11.3
##### Pytorch
1.13.0
##### Transformers
4.24.0
#### GPU
False
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/entity_linker_model.py`
Content:
```
1 import logging
2 import re
3 from functools import lru_cache
4 from pathlib import Path
5 from typing import Any, Callable, Dict, List, Optional, Set, Union
6 from unicodedata import category
7
8 import torch
9
10 import flair.embeddings
11 import flair.nn
12 from flair.data import Dictionary, Sentence, Span
13 from flair.file_utils import cached_path
14
15 log = logging.getLogger("flair")
16
17
18 class CandidateGenerator:
19 """
20 Given a string, the CandidateGenerator returns possible target classes as candidates.
21 """
22
23 def __init__(self, candidates: Union[str, Dict], backoff: bool = True):
24 # internal candidate lists of generator
25 self.mention_to_candidates_map: Dict = {}
26
27 # load Zelda candidates if so passed
28 if isinstance(candidates, str) and candidates.lower() == "zelda":
29 zelda_path: str = "https://flair.informatik.hu-berlin.de/resources/datasets/zelda"
30 zelda_candidates = cached_path(f"{zelda_path}/zelda_mention_entities_counter.pickle", cache_dir="datasets")
31 import pickle
32
33 with open(zelda_candidates, "rb") as handle:
34 mention_entities_counter = pickle.load(handle)
35
36 # create candidate lists
37 candidate_lists = {}
38 for mention in mention_entities_counter:
39 candidate_lists[mention] = list(mention_entities_counter[mention].keys())
40
41 self.mention_to_candidates_map = candidate_lists
42
43 elif isinstance(candidates, Dict):
44 self.mention_to_candidates_map = candidates
45
46 # if lower casing is enabled, create candidate lists of lower cased versions
47 self.backoff = backoff
48 if self.backoff:
49 # create a new dictionary for lower cased mentions
50 lowercased_mention_to_candidates_map: Dict = {}
51
52 # go through each mention and its candidates
53 for mention, candidates in self.mention_to_candidates_map.items():
54 backoff_mention = self._make_backoff_string(mention)
55 # check if backoff mention already seen. If so, add candidates. Else, create new entry.
56 if backoff_mention in lowercased_mention_to_candidates_map:
57 current_candidates = lowercased_mention_to_candidates_map[backoff_mention]
58 lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)
59 else:
60 lowercased_mention_to_candidates_map[backoff_mention] = candidates
61
62 # set lowercased version as map
63 self.mention_to_candidates_map = lowercased_mention_to_candidates_map
64
65 @lru_cache(maxsize=50000)
66 def _make_backoff_string(self, mention: str) -> str:
67 backoff_mention = mention.lower()
68 backoff_mention = "".join(ch for ch in backoff_mention if category(ch)[0] not in "P")
69 backoff_mention = re.sub(" +", " ", backoff_mention)
70 return backoff_mention
71
72 def get_candidates(self, mention: str) -> Set[str]:
73 """Given a mention, this method returns a set of candidate classes"""
74 if self.backoff:
75 mention = self._make_backoff_string(mention)
76
77 return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()
78
79
80 class EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):
81 """
82 Entity Linking Model
83 The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.
84 To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.
85 The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.
86 """
87
88 def __init__(
89 self,
90 embeddings: flair.embeddings.TokenEmbeddings,
91 label_dictionary: Dictionary,
92 pooling_operation: str = "first_last",
93 label_type: str = "nel",
94 candidates: Optional[CandidateGenerator] = None,
95 **classifierargs,
96 ):
97 """
98 Initializes an EntityLinker
99 :param embeddings: embeddings used to embed the words/sentences
100 :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>
101 :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.
102 E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates
103 the embedding of the first and the embedding of the last word.
104 :param label_type: name of the label you use.
105 """
106
107 super(EntityLinker, self).__init__(
108 embeddings=embeddings,
109 label_dictionary=label_dictionary,
110 final_embedding_size=embeddings.embedding_length * 2
111 if pooling_operation == "first_last"
112 else embeddings.embedding_length,
113 **classifierargs,
114 )
115
116 self.pooling_operation = pooling_operation
117 self._label_type = label_type
118
119 cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {
120 "average": self.emb_mean,
121 "first": self.emb_first,
122 "last": self.emb_last,
123 "first_last": self.emb_firstAndLast,
124 }
125
126 if pooling_operation not in cases:
127 raise KeyError('pooling_operation has to be one of "average", "first", "last" or "first_last"')
128
129 self.aggregated_embedding = cases[pooling_operation]
130
131 self.candidates = candidates
132
133 self.to(flair.device)
134
135 def emb_first(self, span: Span, embedding_names):
136 return span.tokens[0].get_embedding(embedding_names)
137
138 def emb_last(self, span: Span, embedding_names):
139 return span.tokens[-1].get_embedding(embedding_names)
140
141 def emb_firstAndLast(self, span: Span, embedding_names):
142 return torch.cat(
143 (span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0
144 )
145
146 def emb_mean(self, span, embedding_names):
147 return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)
148
149 def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
150 return sentence.get_spans(self.label_type)
151
152 def _filter_data_point(self, data_point: Sentence) -> bool:
153 return bool(data_point.get_labels(self.label_type))
154
155 def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:
156 return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())
157
158 def _get_state_dict(self):
159 model_state = {
160 **super()._get_state_dict(),
161 "word_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
162 "label_type": self.label_type,
163 "label_dictionary": self.label_dictionary,
164 "pooling_operation": self.pooling_operation,
165 "loss_weights": self.weight_dict,
166 "candidates": self.candidates,
167 }
168 return model_state
169
170 def _print_predictions(self, batch, gold_label_type):
171 lines = []
172 for datapoint in batch:
173 eval_line = f"\n{datapoint.to_original_text()}\n"
174
175 for span in datapoint.get_spans(gold_label_type):
176 symbol = "✓" if span.get_label(gold_label_type).value == span.get_label("predicted").value else "❌"
177 eval_line += (
178 f' - "{span.text}" / {span.get_label(gold_label_type).value}'
179 f' --> {span.get_label("predicted").value} ({symbol})\n'
180 )
181
182 lines.append(eval_line)
183 return lines
184
185 @classmethod
186 def _init_model_with_state_dict(cls, state, **kwargs):
187 # remap state dict for models serialized with Flair <= 0.11.3
188 import re
189
190 state_dict = state["state_dict"]
191 for key in list(state_dict.keys()):
192 state_dict[re.sub("^word_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
193
194 return super()._init_model_with_state_dict(
195 state,
196 embeddings=state.get("word_embeddings"),
197 label_dictionary=state.get("label_dictionary"),
198 label_type=state.get("label_type"),
199 pooling_operation=state.get("pooling_operation"),
200 loss_weights=state.get("loss_weights", {"<unk>": 0.3}),
201 candidates=state.get("candidates", None),
202 **kwargs,
203 )
204
205 @property
206 def label_type(self):
207 return self._label_type
208
209 def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):
210 if not self.candidates:
211 return scores
212
213 masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)
214
215 for idx, span in enumerate(data_points):
216 # get the candidates
217 candidate_set = self.candidates.get_candidates(span.text)
218 # during training, add the gold value as candidate
219 if self.training:
220 candidate_set.add(span.get_label(self.label_type).value)
221 candidate_set.add("<unk>")
222 indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]
223 masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]
224
225 return masked_scores
226
227 @classmethod
228 def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "EntityLinker":
229 from typing import cast
230
231 return cast("EntityLinker", super().load(model_path=model_path))
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flair/models/entity_linker_model.py b/flair/models/entity_linker_model.py
--- a/flair/models/entity_linker_model.py
+++ b/flair/models/entity_linker_model.py
@@ -144,7 +144,7 @@
)
def emb_mean(self, span, embedding_names):
- return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)
+ return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
return sentence.get_spans(self.label_type)
| {"golden_diff": "diff --git a/flair/models/entity_linker_model.py b/flair/models/entity_linker_model.py\n--- a/flair/models/entity_linker_model.py\n+++ b/flair/models/entity_linker_model.py\n@@ -144,7 +144,7 @@\n )\n \n def emb_mean(self, span, embedding_names):\n- return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)\n+ return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)\n \n def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:\n return sentence.get_spans(self.label_type)\n", "issue": "[Bug]: Avg Pooling in the Entity Linker\n### Describe the bug\r\n\r\nA runtime error is raised upon prediction when using \"average\" as the pooling operation in the Entity Linker\r\n\r\n### To Reproduce\r\n\r\n```python\r\nfrom flair.data import Corpus\r\nfrom flair.datasets import NEL_ENGLISH_TWEEKI\r\nfrom flair.embeddings import TransformerWordEmbeddings\r\nfrom flair.models import EntityLinker\r\n\r\ncorpus: Corpus = NEL_ENGLISH_TWEEKI(sample_missing_splits=False)\r\n\r\nembeddings = TransformerWordEmbeddings(\r\n model=\"distilbert-base-uncased\",\r\n fine_tune=True,\r\n)\r\n\r\nentity_linker = EntityLinker(\r\n embeddings=embeddings,\r\n label_dictionary=corpus.make_label_dictionary(label_type=\"nel\"),\r\n label_type=\"nel\",\r\n pooling_operation=\"average\",\r\n)\r\n\r\nentity_linker.predict(corpus.train[0])\r\n```\r\n\r\n\r\n### Expected behaivor\r\n\r\nThe Entity Linker should be able to perform average pooling without any issues, like in the case of other pooling options.\r\n\r\n### Logs and Stack traces\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\nCell In [1], line 20\r\n 8 embeddings = TransformerWordEmbeddings(\r\n 9 model=\"distilbert-base-uncased\",\r\n 10 fine_tune=True,\r\n 11 )\r\n 13 entity_linker = EntityLinker(\r\n 14 embeddings=embeddings,\r\n 15 label_dictionary=corpus.make_label_dictionary(label_type=\"nel\"),\r\n 16 label_type=\"nel\",\r\n 17 pooling_operation=\"average\",\r\n 18 )\r\n---> 20 entity_linker.predict(corpus.train[0])\r\n\r\nFile ~/projects/flair_forked/flair/nn/model.py:826, in DefaultClassifier.predict(self, sentences, mini_batch_size, return_probabilities_for_all_classes, verbose, label_name, return_loss, embedding_storage_mode)\r\n 824 # pass data points through network and decode\r\n 825 data_point_tensor = self._encode_data_points(batch, data_points)\r\n--> 826 scores = self.decoder(data_point_tensor)\r\n 827 scores = self._mask_scores(scores, data_points)\r\n 829 # if anything could possibly be predicted\r\n\r\nFile ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)\r\n 1186 # If we don't have any hooks, we want to skip the rest of the logic in\r\n 1187 # this function, and just call forward.\r\n 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\r\n 1189 or _global_forward_hooks or _global_forward_pre_hooks):\r\n-> 1190 return forward_call(*input, **kwargs)\r\n 1191 # Do not call functions when jit is used\r\n 1192 full_backward_hooks, non_full_backward_hooks = [], []\r\n\r\nFile ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)\r\n 113 def forward(self, input: Tensor) -> Tensor:\r\n--> 114 return F.linear(input, self.weight, self.bias)\r\n\r\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (1x4 and 768x650)\r\n```\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n#### Versions:\r\n##### Flair\r\n0.11.3\r\n##### Pytorch\r\n1.13.0\r\n##### Transformers\r\n4.24.0\r\n#### GPU\r\nFalse\n", "before_files": [{"content": "import logging\nimport re\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\nfrom unicodedata import category\n\nimport torch\n\nimport flair.embeddings\nimport flair.nn\nfrom flair.data import Dictionary, Sentence, Span\nfrom flair.file_utils import cached_path\n\nlog = logging.getLogger(\"flair\")\n\n\nclass CandidateGenerator:\n \"\"\"\n Given a string, the CandidateGenerator returns possible target classes as candidates.\n \"\"\"\n\n def __init__(self, candidates: Union[str, Dict], backoff: bool = True):\n # internal candidate lists of generator\n self.mention_to_candidates_map: Dict = {}\n\n # load Zelda candidates if so passed\n if isinstance(candidates, str) and candidates.lower() == \"zelda\":\n zelda_path: str = \"https://flair.informatik.hu-berlin.de/resources/datasets/zelda\"\n zelda_candidates = cached_path(f\"{zelda_path}/zelda_mention_entities_counter.pickle\", cache_dir=\"datasets\")\n import pickle\n\n with open(zelda_candidates, \"rb\") as handle:\n mention_entities_counter = pickle.load(handle)\n\n # create candidate lists\n candidate_lists = {}\n for mention in mention_entities_counter:\n candidate_lists[mention] = list(mention_entities_counter[mention].keys())\n\n self.mention_to_candidates_map = candidate_lists\n\n elif isinstance(candidates, Dict):\n self.mention_to_candidates_map = candidates\n\n # if lower casing is enabled, create candidate lists of lower cased versions\n self.backoff = backoff\n if self.backoff:\n # create a new dictionary for lower cased mentions\n lowercased_mention_to_candidates_map: Dict = {}\n\n # go through each mention and its candidates\n for mention, candidates in self.mention_to_candidates_map.items():\n backoff_mention = self._make_backoff_string(mention)\n # check if backoff mention already seen. If so, add candidates. Else, create new entry.\n if backoff_mention in lowercased_mention_to_candidates_map:\n current_candidates = lowercased_mention_to_candidates_map[backoff_mention]\n lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)\n else:\n lowercased_mention_to_candidates_map[backoff_mention] = candidates\n\n # set lowercased version as map\n self.mention_to_candidates_map = lowercased_mention_to_candidates_map\n\n @lru_cache(maxsize=50000)\n def _make_backoff_string(self, mention: str) -> str:\n backoff_mention = mention.lower()\n backoff_mention = \"\".join(ch for ch in backoff_mention if category(ch)[0] not in \"P\")\n backoff_mention = re.sub(\" +\", \" \", backoff_mention)\n return backoff_mention\n\n def get_candidates(self, mention: str) -> Set[str]:\n \"\"\"Given a mention, this method returns a set of candidate classes\"\"\"\n if self.backoff:\n mention = self._make_backoff_string(mention)\n\n return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()\n\n\nclass EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):\n \"\"\"\n Entity Linking Model\n The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.\n To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.\n The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.\n \"\"\"\n\n def __init__(\n self,\n embeddings: flair.embeddings.TokenEmbeddings,\n label_dictionary: Dictionary,\n pooling_operation: str = \"first_last\",\n label_type: str = \"nel\",\n candidates: Optional[CandidateGenerator] = None,\n **classifierargs,\n ):\n \"\"\"\n Initializes an EntityLinker\n :param embeddings: embeddings used to embed the words/sentences\n :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>\n :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.\n E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates\n the embedding of the first and the embedding of the last word.\n :param label_type: name of the label you use.\n \"\"\"\n\n super(EntityLinker, self).__init__(\n embeddings=embeddings,\n label_dictionary=label_dictionary,\n final_embedding_size=embeddings.embedding_length * 2\n if pooling_operation == \"first_last\"\n else embeddings.embedding_length,\n **classifierargs,\n )\n\n self.pooling_operation = pooling_operation\n self._label_type = label_type\n\n cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {\n \"average\": self.emb_mean,\n \"first\": self.emb_first,\n \"last\": self.emb_last,\n \"first_last\": self.emb_firstAndLast,\n }\n\n if pooling_operation not in cases:\n raise KeyError('pooling_operation has to be one of \"average\", \"first\", \"last\" or \"first_last\"')\n\n self.aggregated_embedding = cases[pooling_operation]\n\n self.candidates = candidates\n\n self.to(flair.device)\n\n def emb_first(self, span: Span, embedding_names):\n return span.tokens[0].get_embedding(embedding_names)\n\n def emb_last(self, span: Span, embedding_names):\n return span.tokens[-1].get_embedding(embedding_names)\n\n def emb_firstAndLast(self, span: Span, embedding_names):\n return torch.cat(\n (span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0\n )\n\n def emb_mean(self, span, embedding_names):\n return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)\n\n def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:\n return sentence.get_spans(self.label_type)\n\n def _filter_data_point(self, data_point: Sentence) -> bool:\n return bool(data_point.get_labels(self.label_type))\n\n def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:\n return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())\n\n def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"word_embeddings\": self.embeddings.save_embeddings(use_state_dict=False),\n \"label_type\": self.label_type,\n \"label_dictionary\": self.label_dictionary,\n \"pooling_operation\": self.pooling_operation,\n \"loss_weights\": self.weight_dict,\n \"candidates\": self.candidates,\n }\n return model_state\n\n def _print_predictions(self, batch, gold_label_type):\n lines = []\n for datapoint in batch:\n eval_line = f\"\\n{datapoint.to_original_text()}\\n\"\n\n for span in datapoint.get_spans(gold_label_type):\n symbol = \"\u2713\" if span.get_label(gold_label_type).value == span.get_label(\"predicted\").value else \"\u274c\"\n eval_line += (\n f' - \"{span.text}\" / {span.get_label(gold_label_type).value}'\n f' --> {span.get_label(\"predicted\").value} ({symbol})\\n'\n )\n\n lines.append(eval_line)\n return lines\n\n @classmethod\n def _init_model_with_state_dict(cls, state, **kwargs):\n # remap state dict for models serialized with Flair <= 0.11.3\n import re\n\n state_dict = state[\"state_dict\"]\n for key in list(state_dict.keys()):\n state_dict[re.sub(\"^word_embeddings\\\\.\", \"embeddings.\", key)] = state_dict.pop(key)\n\n return super()._init_model_with_state_dict(\n state,\n embeddings=state.get(\"word_embeddings\"),\n label_dictionary=state.get(\"label_dictionary\"),\n label_type=state.get(\"label_type\"),\n pooling_operation=state.get(\"pooling_operation\"),\n loss_weights=state.get(\"loss_weights\", {\"<unk>\": 0.3}),\n candidates=state.get(\"candidates\", None),\n **kwargs,\n )\n\n @property\n def label_type(self):\n return self._label_type\n\n def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):\n if not self.candidates:\n return scores\n\n masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)\n\n for idx, span in enumerate(data_points):\n # get the candidates\n candidate_set = self.candidates.get_candidates(span.text)\n # during training, add the gold value as candidate\n if self.training:\n candidate_set.add(span.get_label(self.label_type).value)\n candidate_set.add(\"<unk>\")\n indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]\n masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]\n\n return masked_scores\n\n @classmethod\n def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> \"EntityLinker\":\n from typing import cast\n\n return cast(\"EntityLinker\", super().load(model_path=model_path))\n", "path": "flair/models/entity_linker_model.py"}], "after_files": [{"content": "import logging\nimport re\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\nfrom unicodedata import category\n\nimport torch\n\nimport flair.embeddings\nimport flair.nn\nfrom flair.data import Dictionary, Sentence, Span\nfrom flair.file_utils import cached_path\n\nlog = logging.getLogger(\"flair\")\n\n\nclass CandidateGenerator:\n \"\"\"\n Given a string, the CandidateGenerator returns possible target classes as candidates.\n \"\"\"\n\n def __init__(self, candidates: Union[str, Dict], backoff: bool = True):\n # internal candidate lists of generator\n self.mention_to_candidates_map: Dict = {}\n\n # load Zelda candidates if so passed\n if isinstance(candidates, str) and candidates.lower() == \"zelda\":\n zelda_path: str = \"https://flair.informatik.hu-berlin.de/resources/datasets/zelda\"\n zelda_candidates = cached_path(f\"{zelda_path}/zelda_mention_entities_counter.pickle\", cache_dir=\"datasets\")\n import pickle\n\n with open(zelda_candidates, \"rb\") as handle:\n mention_entities_counter = pickle.load(handle)\n\n # create candidate lists\n candidate_lists = {}\n for mention in mention_entities_counter:\n candidate_lists[mention] = list(mention_entities_counter[mention].keys())\n\n self.mention_to_candidates_map = candidate_lists\n\n elif isinstance(candidates, Dict):\n self.mention_to_candidates_map = candidates\n\n # if lower casing is enabled, create candidate lists of lower cased versions\n self.backoff = backoff\n if self.backoff:\n # create a new dictionary for lower cased mentions\n lowercased_mention_to_candidates_map: Dict = {}\n\n # go through each mention and its candidates\n for mention, candidates in self.mention_to_candidates_map.items():\n backoff_mention = self._make_backoff_string(mention)\n # check if backoff mention already seen. If so, add candidates. Else, create new entry.\n if backoff_mention in lowercased_mention_to_candidates_map:\n current_candidates = lowercased_mention_to_candidates_map[backoff_mention]\n lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)\n else:\n lowercased_mention_to_candidates_map[backoff_mention] = candidates\n\n # set lowercased version as map\n self.mention_to_candidates_map = lowercased_mention_to_candidates_map\n\n @lru_cache(maxsize=50000)\n def _make_backoff_string(self, mention: str) -> str:\n backoff_mention = mention.lower()\n backoff_mention = \"\".join(ch for ch in backoff_mention if category(ch)[0] not in \"P\")\n backoff_mention = re.sub(\" +\", \" \", backoff_mention)\n return backoff_mention\n\n def get_candidates(self, mention: str) -> Set[str]:\n \"\"\"Given a mention, this method returns a set of candidate classes\"\"\"\n if self.backoff:\n mention = self._make_backoff_string(mention)\n\n return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()\n\n\nclass EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):\n \"\"\"\n Entity Linking Model\n The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.\n To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.\n The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.\n \"\"\"\n\n def __init__(\n self,\n embeddings: flair.embeddings.TokenEmbeddings,\n label_dictionary: Dictionary,\n pooling_operation: str = \"first_last\",\n label_type: str = \"nel\",\n candidates: Optional[CandidateGenerator] = None,\n **classifierargs,\n ):\n \"\"\"\n Initializes an EntityLinker\n :param embeddings: embeddings used to embed the words/sentences\n :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>\n :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.\n E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates\n the embedding of the first and the embedding of the last word.\n :param label_type: name of the label you use.\n \"\"\"\n\n super(EntityLinker, self).__init__(\n embeddings=embeddings,\n label_dictionary=label_dictionary,\n final_embedding_size=embeddings.embedding_length * 2\n if pooling_operation == \"first_last\"\n else embeddings.embedding_length,\n **classifierargs,\n )\n\n self.pooling_operation = pooling_operation\n self._label_type = label_type\n\n cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {\n \"average\": self.emb_mean,\n \"first\": self.emb_first,\n \"last\": self.emb_last,\n \"first_last\": self.emb_firstAndLast,\n }\n\n if pooling_operation not in cases:\n raise KeyError('pooling_operation has to be one of \"average\", \"first\", \"last\" or \"first_last\"')\n\n self.aggregated_embedding = cases[pooling_operation]\n\n self.candidates = candidates\n\n self.to(flair.device)\n\n def emb_first(self, span: Span, embedding_names):\n return span.tokens[0].get_embedding(embedding_names)\n\n def emb_last(self, span: Span, embedding_names):\n return span.tokens[-1].get_embedding(embedding_names)\n\n def emb_firstAndLast(self, span: Span, embedding_names):\n return torch.cat(\n (span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0\n )\n\n def emb_mean(self, span, embedding_names):\n return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)\n\n def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:\n return sentence.get_spans(self.label_type)\n\n def _filter_data_point(self, data_point: Sentence) -> bool:\n return bool(data_point.get_labels(self.label_type))\n\n def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:\n return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())\n\n def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"word_embeddings\": self.embeddings.save_embeddings(use_state_dict=False),\n \"label_type\": self.label_type,\n \"label_dictionary\": self.label_dictionary,\n \"pooling_operation\": self.pooling_operation,\n \"loss_weights\": self.weight_dict,\n \"candidates\": self.candidates,\n }\n return model_state\n\n def _print_predictions(self, batch, gold_label_type):\n lines = []\n for datapoint in batch:\n eval_line = f\"\\n{datapoint.to_original_text()}\\n\"\n\n for span in datapoint.get_spans(gold_label_type):\n symbol = \"\u2713\" if span.get_label(gold_label_type).value == span.get_label(\"predicted\").value else \"\u274c\"\n eval_line += (\n f' - \"{span.text}\" / {span.get_label(gold_label_type).value}'\n f' --> {span.get_label(\"predicted\").value} ({symbol})\\n'\n )\n\n lines.append(eval_line)\n return lines\n\n @classmethod\n def _init_model_with_state_dict(cls, state, **kwargs):\n # remap state dict for models serialized with Flair <= 0.11.3\n import re\n\n state_dict = state[\"state_dict\"]\n for key in list(state_dict.keys()):\n state_dict[re.sub(\"^word_embeddings\\\\.\", \"embeddings.\", key)] = state_dict.pop(key)\n\n return super()._init_model_with_state_dict(\n state,\n embeddings=state.get(\"word_embeddings\"),\n label_dictionary=state.get(\"label_dictionary\"),\n label_type=state.get(\"label_type\"),\n pooling_operation=state.get(\"pooling_operation\"),\n loss_weights=state.get(\"loss_weights\", {\"<unk>\": 0.3}),\n candidates=state.get(\"candidates\", None),\n **kwargs,\n )\n\n @property\n def label_type(self):\n return self._label_type\n\n def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):\n if not self.candidates:\n return scores\n\n masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)\n\n for idx, span in enumerate(data_points):\n # get the candidates\n candidate_set = self.candidates.get_candidates(span.text)\n # during training, add the gold value as candidate\n if self.training:\n candidate_set.add(span.get_label(self.label_type).value)\n candidate_set.add(\"<unk>\")\n indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]\n masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]\n\n return masked_scores\n\n @classmethod\n def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> \"EntityLinker\":\n from typing import cast\n\n return cast(\"EntityLinker\", super().load(model_path=model_path))\n", "path": "flair/models/entity_linker_model.py"}]} | 3,774 | 154 |
gh_patches_debug_23023 | rasdani/github-patches | git_diff | conda__conda-9464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda raises bogus NotWritableError exception for a bogus attempt to chown to root
This isn't a dupe (well it is a dupe of #7109) but the issue still persists.
#7180 fixed the problem in one section of code, but the same problematic snippet is present here:
https://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121
conda raises bogus NotWritableError exception for a bogus attempt to chown to root
This isn't a dupe (well it is a dupe of #7109) but the issue still persists.
#7180 fixed the problem in one section of code, but the same problematic snippet is present here:
https://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/gateways/disk/update.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 from __future__ import absolute_import, division, print_function, unicode_literals
5
6 from errno import EINVAL, EXDEV, EPERM
7 from logging import getLogger
8 import os
9 from os.path import dirname, isdir, split, basename, join, exists
10 import re
11 from shutil import move
12 from subprocess import Popen, PIPE
13
14 from . import exp_backoff_fn, mkdir_p, mkdir_p_sudo_safe
15 from .delete import rm_rf
16 from .link import lexists
17 from ...base.context import context
18 from ...common.compat import on_win
19 from ...common.path import expand
20 from ...exceptions import NotWritableError
21
22 log = getLogger(__name__)
23
24 SHEBANG_REGEX = re.compile(br'^(#!((?:\\ |[^ \n\r])+)(.*))')
25
26
27 class CancelOperation(Exception):
28 pass
29
30
31 def update_file_in_place_as_binary(file_full_path, callback):
32 # callback should be a callable that takes one positional argument, which is the
33 # content of the file before updating
34 # this method updates the file in-place, without releasing the file lock
35 fh = None
36 try:
37 fh = exp_backoff_fn(open, file_full_path, 'rb+')
38 log.trace("in-place update path locked for %s", file_full_path)
39 data = fh.read()
40 fh.seek(0)
41 try:
42 fh.write(callback(data))
43 fh.truncate()
44 except CancelOperation:
45 pass # NOQA
46 finally:
47 if fh:
48 fh.close()
49
50
51 def rename(source_path, destination_path, force=False):
52 if lexists(destination_path) and force:
53 rm_rf(destination_path)
54 if lexists(source_path):
55 log.trace("renaming %s => %s", source_path, destination_path)
56 try:
57 os.rename(source_path, destination_path)
58 except EnvironmentError as e:
59 if (on_win and dirname(source_path) == dirname(destination_path)
60 and os.path.isfile(source_path)):
61 condabin_dir = join(context.conda_prefix, "condabin")
62 rename_script = join(condabin_dir, 'rename_tmp.bat')
63 if exists(rename_script):
64 _dirname, _src_fn = split(source_path)
65 _dest_fn = basename(destination_path)
66 p = Popen(['cmd.exe', '/C', rename_script, _dirname,
67 _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE)
68 stdout, stderr = p.communicate()
69 else:
70 log.debug("{} is missing. Conda was not installed correctly or has been "
71 "corrupted. Please file an issue on the conda github repo."
72 .format(rename_script))
73 elif e.errno in (EINVAL, EXDEV, EPERM):
74 # https://github.com/conda/conda/issues/6811
75 # https://github.com/conda/conda/issues/6711
76 log.trace("Could not rename %s => %s due to errno [%s]. Falling back"
77 " to copy/unlink", source_path, destination_path, e.errno)
78 # https://github.com/moby/moby/issues/25409#issuecomment-238537855
79 # shutil.move() falls back to copy+unlink
80 move(source_path, destination_path)
81 else:
82 raise
83 else:
84 log.trace("cannot rename; source path does not exist '%s'", source_path)
85
86
87 def backoff_rename(source_path, destination_path, force=False):
88 exp_backoff_fn(rename, source_path, destination_path, force)
89
90
91 def touch(path, mkdir=False, sudo_safe=False):
92 # sudo_safe: use any time `path` is within the user's home directory
93 # returns:
94 # True if the file did not exist but was created
95 # False if the file already existed
96 # raises: NotWritableError, which is also an OSError having attached errno
97 try:
98 path = expand(path)
99 log.trace("touching path %s", path)
100 if lexists(path):
101 os.utime(path, None)
102 return True
103 else:
104 dirpath = dirname(path)
105 if not isdir(dirpath) and mkdir:
106 if sudo_safe:
107 mkdir_p_sudo_safe(dirpath)
108 else:
109 mkdir_p(dirpath)
110 else:
111 assert isdir(dirname(path))
112 try:
113 fh = open(path, 'a')
114 except:
115 raise
116 else:
117 fh.close()
118 if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
119 uid = int(os.environ['SUDO_UID'])
120 gid = int(os.environ.get('SUDO_GID', -1))
121 log.trace("chowning %s:%s %s", uid, gid, path)
122 os.chown(path, uid, gid)
123 return False
124 except (IOError, OSError) as e:
125 raise NotWritableError(path, e.errno, caused_by=e)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/gateways/disk/update.py b/conda/gateways/disk/update.py
--- a/conda/gateways/disk/update.py
+++ b/conda/gateways/disk/update.py
@@ -109,17 +109,17 @@
mkdir_p(dirpath)
else:
assert isdir(dirname(path))
- try:
- fh = open(path, 'a')
- except:
- raise
- else:
- fh.close()
- if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
- uid = int(os.environ['SUDO_UID'])
- gid = int(os.environ.get('SUDO_GID', -1))
- log.trace("chowning %s:%s %s", uid, gid, path)
- os.chown(path, uid, gid)
- return False
+ with open(path, 'a'):
+ pass
+ # This chown call causes a false positive PermissionError to be
+ # raised (similar to #7109) when called in an environment which
+ # comes from sudo -u.
+ #
+ # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
+ # uid = int(os.environ['SUDO_UID'])
+ # gid = int(os.environ.get('SUDO_GID', -1))
+ # log.trace("chowning %s:%s %s", uid, gid, path)
+ # os.chown(path, uid, gid)
+ return False
except (IOError, OSError) as e:
raise NotWritableError(path, e.errno, caused_by=e)
| {"golden_diff": "diff --git a/conda/gateways/disk/update.py b/conda/gateways/disk/update.py\n--- a/conda/gateways/disk/update.py\n+++ b/conda/gateways/disk/update.py\n@@ -109,17 +109,17 @@\n mkdir_p(dirpath)\n else:\n assert isdir(dirname(path))\n- try:\n- fh = open(path, 'a')\n- except:\n- raise\n- else:\n- fh.close()\n- if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n- uid = int(os.environ['SUDO_UID'])\n- gid = int(os.environ.get('SUDO_GID', -1))\n- log.trace(\"chowning %s:%s %s\", uid, gid, path)\n- os.chown(path, uid, gid)\n- return False\n+ with open(path, 'a'):\n+ pass\n+ # This chown call causes a false positive PermissionError to be\n+ # raised (similar to #7109) when called in an environment which\n+ # comes from sudo -u.\n+ #\n+ # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n+ # uid = int(os.environ['SUDO_UID'])\n+ # gid = int(os.environ.get('SUDO_GID', -1))\n+ # log.trace(\"chowning %s:%s %s\", uid, gid, path)\n+ # os.chown(path, uid, gid)\n+ return False\n except (IOError, OSError) as e:\n raise NotWritableError(path, e.errno, caused_by=e)\n", "issue": "conda raises bogus NotWritableError exception for a bogus attempt to chown to root\nThis isn't a dupe (well it is a dupe of #7109) but the issue still persists.\r\n\r\n#7180 fixed the problem in one section of code, but the same problematic snippet is present here:\r\n\r\nhttps://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121\nconda raises bogus NotWritableError exception for a bogus attempt to chown to root\nThis isn't a dupe (well it is a dupe of #7109) but the issue still persists.\r\n\r\n#7180 fixed the problem in one section of code, but the same problematic snippet is present here:\r\n\r\nhttps://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom errno import EINVAL, EXDEV, EPERM\nfrom logging import getLogger\nimport os\nfrom os.path import dirname, isdir, split, basename, join, exists\nimport re\nfrom shutil import move\nfrom subprocess import Popen, PIPE\n\nfrom . import exp_backoff_fn, mkdir_p, mkdir_p_sudo_safe\nfrom .delete import rm_rf\nfrom .link import lexists\nfrom ...base.context import context\nfrom ...common.compat import on_win\nfrom ...common.path import expand\nfrom ...exceptions import NotWritableError\n\nlog = getLogger(__name__)\n\nSHEBANG_REGEX = re.compile(br'^(#!((?:\\\\ |[^ \\n\\r])+)(.*))')\n\n\nclass CancelOperation(Exception):\n pass\n\n\ndef update_file_in_place_as_binary(file_full_path, callback):\n # callback should be a callable that takes one positional argument, which is the\n # content of the file before updating\n # this method updates the file in-place, without releasing the file lock\n fh = None\n try:\n fh = exp_backoff_fn(open, file_full_path, 'rb+')\n log.trace(\"in-place update path locked for %s\", file_full_path)\n data = fh.read()\n fh.seek(0)\n try:\n fh.write(callback(data))\n fh.truncate()\n except CancelOperation:\n pass # NOQA\n finally:\n if fh:\n fh.close()\n\n\ndef rename(source_path, destination_path, force=False):\n if lexists(destination_path) and force:\n rm_rf(destination_path)\n if lexists(source_path):\n log.trace(\"renaming %s => %s\", source_path, destination_path)\n try:\n os.rename(source_path, destination_path)\n except EnvironmentError as e:\n if (on_win and dirname(source_path) == dirname(destination_path)\n and os.path.isfile(source_path)):\n condabin_dir = join(context.conda_prefix, \"condabin\")\n rename_script = join(condabin_dir, 'rename_tmp.bat')\n if exists(rename_script):\n _dirname, _src_fn = split(source_path)\n _dest_fn = basename(destination_path)\n p = Popen(['cmd.exe', '/C', rename_script, _dirname,\n _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n else:\n log.debug(\"{} is missing. Conda was not installed correctly or has been \"\n \"corrupted. Please file an issue on the conda github repo.\"\n .format(rename_script))\n elif e.errno in (EINVAL, EXDEV, EPERM):\n # https://github.com/conda/conda/issues/6811\n # https://github.com/conda/conda/issues/6711\n log.trace(\"Could not rename %s => %s due to errno [%s]. Falling back\"\n \" to copy/unlink\", source_path, destination_path, e.errno)\n # https://github.com/moby/moby/issues/25409#issuecomment-238537855\n # shutil.move() falls back to copy+unlink\n move(source_path, destination_path)\n else:\n raise\n else:\n log.trace(\"cannot rename; source path does not exist '%s'\", source_path)\n\n\ndef backoff_rename(source_path, destination_path, force=False):\n exp_backoff_fn(rename, source_path, destination_path, force)\n\n\ndef touch(path, mkdir=False, sudo_safe=False):\n # sudo_safe: use any time `path` is within the user's home directory\n # returns:\n # True if the file did not exist but was created\n # False if the file already existed\n # raises: NotWritableError, which is also an OSError having attached errno\n try:\n path = expand(path)\n log.trace(\"touching path %s\", path)\n if lexists(path):\n os.utime(path, None)\n return True\n else:\n dirpath = dirname(path)\n if not isdir(dirpath) and mkdir:\n if sudo_safe:\n mkdir_p_sudo_safe(dirpath)\n else:\n mkdir_p(dirpath)\n else:\n assert isdir(dirname(path))\n try:\n fh = open(path, 'a')\n except:\n raise\n else:\n fh.close()\n if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n uid = int(os.environ['SUDO_UID'])\n gid = int(os.environ.get('SUDO_GID', -1))\n log.trace(\"chowning %s:%s %s\", uid, gid, path)\n os.chown(path, uid, gid)\n return False\n except (IOError, OSError) as e:\n raise NotWritableError(path, e.errno, caused_by=e)\n", "path": "conda/gateways/disk/update.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom errno import EINVAL, EXDEV, EPERM\nfrom logging import getLogger\nimport os\nfrom os.path import dirname, isdir, split, basename, join, exists\nimport re\nfrom shutil import move\nfrom subprocess import Popen, PIPE\n\nfrom . import exp_backoff_fn, mkdir_p, mkdir_p_sudo_safe\nfrom .delete import rm_rf\nfrom .link import lexists\nfrom ...base.context import context\nfrom ...common.compat import on_win\nfrom ...common.path import expand\nfrom ...exceptions import NotWritableError\n\nlog = getLogger(__name__)\n\nSHEBANG_REGEX = re.compile(br'^(#!((?:\\\\ |[^ \\n\\r])+)(.*))')\n\n\nclass CancelOperation(Exception):\n pass\n\n\ndef update_file_in_place_as_binary(file_full_path, callback):\n # callback should be a callable that takes one positional argument, which is the\n # content of the file before updating\n # this method updates the file in-place, without releasing the file lock\n fh = None\n try:\n fh = exp_backoff_fn(open, file_full_path, 'rb+')\n log.trace(\"in-place update path locked for %s\", file_full_path)\n data = fh.read()\n fh.seek(0)\n try:\n fh.write(callback(data))\n fh.truncate()\n except CancelOperation:\n pass # NOQA\n finally:\n if fh:\n fh.close()\n\n\ndef rename(source_path, destination_path, force=False):\n if lexists(destination_path) and force:\n rm_rf(destination_path)\n if lexists(source_path):\n log.trace(\"renaming %s => %s\", source_path, destination_path)\n try:\n os.rename(source_path, destination_path)\n except EnvironmentError as e:\n if (on_win and dirname(source_path) == dirname(destination_path)\n and os.path.isfile(source_path)):\n condabin_dir = join(context.conda_prefix, \"condabin\")\n rename_script = join(condabin_dir, 'rename_tmp.bat')\n if exists(rename_script):\n _dirname, _src_fn = split(source_path)\n _dest_fn = basename(destination_path)\n p = Popen(['cmd.exe', '/C', rename_script, _dirname,\n _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n else:\n log.debug(\"{} is missing. Conda was not installed correctly or has been \"\n \"corrupted. Please file an issue on the conda github repo.\"\n .format(rename_script))\n elif e.errno in (EINVAL, EXDEV, EPERM):\n # https://github.com/conda/conda/issues/6811\n # https://github.com/conda/conda/issues/6711\n log.trace(\"Could not rename %s => %s due to errno [%s]. Falling back\"\n \" to copy/unlink\", source_path, destination_path, e.errno)\n # https://github.com/moby/moby/issues/25409#issuecomment-238537855\n # shutil.move() falls back to copy+unlink\n move(source_path, destination_path)\n else:\n raise\n else:\n log.trace(\"cannot rename; source path does not exist '%s'\", source_path)\n\n\ndef backoff_rename(source_path, destination_path, force=False):\n exp_backoff_fn(rename, source_path, destination_path, force)\n\n\ndef touch(path, mkdir=False, sudo_safe=False):\n # sudo_safe: use any time `path` is within the user's home directory\n # returns:\n # True if the file did not exist but was created\n # False if the file already existed\n # raises: NotWritableError, which is also an OSError having attached errno\n try:\n path = expand(path)\n log.trace(\"touching path %s\", path)\n if lexists(path):\n os.utime(path, None)\n return True\n else:\n dirpath = dirname(path)\n if not isdir(dirpath) and mkdir:\n if sudo_safe:\n mkdir_p_sudo_safe(dirpath)\n else:\n mkdir_p(dirpath)\n else:\n assert isdir(dirname(path))\n with open(path, 'a'):\n pass\n # This chown call causes a false positive PermissionError to be\n # raised (similar to #7109) when called in an environment which\n # comes from sudo -u.\n #\n # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n # uid = int(os.environ['SUDO_UID'])\n # gid = int(os.environ.get('SUDO_GID', -1))\n # log.trace(\"chowning %s:%s %s\", uid, gid, path)\n # os.chown(path, uid, gid)\n return False\n except (IOError, OSError) as e:\n raise NotWritableError(path, e.errno, caused_by=e)\n", "path": "conda/gateways/disk/update.py"}]} | 1,809 | 383 |
gh_patches_debug_21705 | rasdani/github-patches | git_diff | crytic__slither-373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
slither-flat does not handle cycle
Example:
```
$ echo a.sol
import './b.sol';
contract A{
B b;
}
$ echo b.sol
import './a.sol';
contract B{
A a;
}
```
slither-flat does not handle cycle
Example:
```
$ echo a.sol
import './b.sol';
contract A{
B b;
}
$ echo b.sol
import './a.sol';
contract B{
A a;
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/tools/flattening/flattening.py`
Content:
```
1 from pathlib import Path
2 import re
3 import logging
4 from slither.exceptions import SlitherException
5 from slither.core.solidity_types.user_defined_type import UserDefinedType
6 from slither.core.declarations.structure import Structure
7 from slither.core.declarations.enum import Enum
8 from slither.core.declarations.contract import Contract
9 from slither.slithir.operations import NewContract, TypeConversion
10
11 logger = logging.getLogger("Slither-flattening")
12
13 class Flattening:
14
15 DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')
16
17 def __init__(self, slither, external_to_public=False):
18 self._source_codes = {}
19 self._slither = slither
20 self._external_to_public = external_to_public
21 self._use_abi_encoder_v2 = False
22
23 self._check_abi_encoder_v2()
24
25 for contract in slither.contracts:
26 self._get_source_code(contract)
27
28 def _check_abi_encoder_v2(self):
29 for p in self._slither.pragma_directives:
30 if 'ABIEncoderV2' in str(p.directive):
31 self._use_abi_encoder_v2 = True
32 return
33
34 def _get_source_code(self, contract):
35 src_mapping = contract.source_mapping
36 content = self._slither.source_code[src_mapping['filename_absolute']]
37 start = src_mapping['start']
38 end = src_mapping['start'] + src_mapping['length']
39
40 # interface must use external
41 if self._external_to_public and contract.contract_kind != "interface":
42 # to_patch is a list of (index, bool). The bool indicates
43 # if the index is for external -> public (true)
44 # or a calldata -> memory (false)
45 to_patch = []
46 for f in contract.functions_declared:
47 # fallback must be external
48 if f.is_fallback or f.is_constructor_variables:
49 continue
50 if f.visibility == 'external':
51 attributes_start = (f.parameters_src.source_mapping['start'] +
52 f.parameters_src.source_mapping['length'])
53 attributes_end = f.returns_src.source_mapping['start']
54 attributes = content[attributes_start:attributes_end]
55 regex = re.search(r'((\sexternal)\s+)|(\sexternal)$|(\)external)$', attributes)
56 if regex:
57 to_patch.append((attributes_start + regex.span()[0] + 1, True))
58 else:
59 raise SlitherException(f'External keyword not found {f.name} {attributes}')
60
61 for var in f.parameters:
62 if var.location == "calldata":
63 calldata_start = var.source_mapping['start']
64 calldata_end = calldata_start + var.source_mapping['length']
65 calldata_idx = content[calldata_start:calldata_end].find(' calldata ')
66 to_patch.append((calldata_start + calldata_idx + 1, False))
67
68 to_patch.sort(key=lambda x:x[0], reverse=True)
69
70 content = content[start:end]
71 for (index, is_external) in to_patch:
72 index = index - start
73 if is_external:
74 content = content[:index] + 'public' + content[index + len('external'):]
75 else:
76 content = content[:index] + 'memory' + content[index + len('calldata'):]
77 else:
78 content = content[start:end]
79
80 self._source_codes[contract] = content
81
82
83 def _export_from_type(self, t, contract, exported, list_contract):
84 if isinstance(t, UserDefinedType):
85 if isinstance(t.type, (Enum, Structure)):
86 if t.type.contract != contract and not t.type.contract in exported:
87 self._export_contract(t.type.contract, exported, list_contract)
88 else:
89 assert isinstance(t.type, Contract)
90 if t.type != contract and not t.type in exported:
91 self._export_contract(t.type, exported, list_contract)
92
93 def _export_contract(self, contract, exported, list_contract):
94 if contract.name in exported:
95 return
96 for inherited in contract.inheritance:
97 self._export_contract(inherited, exported, list_contract)
98
99 # Find all the external contracts called
100 externals = contract.all_library_calls + contract.all_high_level_calls
101 # externals is a list of (contract, function)
102 # We also filter call to itself to avoid infilite loop
103 externals = list(set([e[0] for e in externals if e[0] != contract]))
104
105 for inherited in externals:
106 self._export_contract(inherited, exported, list_contract)
107
108 # Find all the external contracts use as a base type
109 local_vars = []
110 for f in contract.functions_declared:
111 local_vars += f.variables
112
113 for v in contract.variables + local_vars:
114 self._export_from_type(v.type, contract, exported, list_contract)
115
116 # Find all convert and "new" operation that can lead to use an external contract
117 for f in contract.functions_declared:
118 for ir in f.slithir_operations:
119 if isinstance(ir, NewContract):
120 if ir.contract_created != contract and not ir.contract_created in exported:
121 self._export_contract(ir.contract_created, exported, list_contract)
122 if isinstance(ir, TypeConversion):
123 self._export_from_type(ir.type, contract, exported, list_contract)
124 if contract.name in exported:
125 return
126 exported.add(contract.name)
127 list_contract.append(self._source_codes[contract])
128
129 def _export(self, contract, ret):
130 self._export_contract(contract, set(), ret)
131 path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')
132 logger.info(f'Export {path}')
133 with open(path, 'w') as f:
134 if self._slither.solc_version:
135 f.write(f'pragma solidity {self._slither.solc_version};\n')
136 if self._use_abi_encoder_v2:
137 f.write('pragma experimental ABIEncoderV2;\n')
138 f.write('\n'.join(ret))
139 f.write('\n')
140
141 def export(self, target=None):
142
143 if not self.DEFAULT_EXPORT_PATH.exists():
144 self.DEFAULT_EXPORT_PATH.mkdir(parents=True)
145
146 if target is None:
147 for contract in self._slither.contracts_derived:
148 ret = []
149 self._export(contract, ret)
150 else:
151 contract = self._slither.get_contract_from_name(target)
152 if contract is None:
153 logger.error(f'{target} not found')
154 else:
155 ret = []
156 self._export(contract, ret)
157
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py
--- a/slither/tools/flattening/flattening.py
+++ b/slither/tools/flattening/flattening.py
@@ -93,6 +93,7 @@
def _export_contract(self, contract, exported, list_contract):
if contract.name in exported:
return
+ exported.add(contract.name)
for inherited in contract.inheritance:
self._export_contract(inherited, exported, list_contract)
@@ -121,9 +122,6 @@
self._export_contract(ir.contract_created, exported, list_contract)
if isinstance(ir, TypeConversion):
self._export_from_type(ir.type, contract, exported, list_contract)
- if contract.name in exported:
- return
- exported.add(contract.name)
list_contract.append(self._source_codes[contract])
def _export(self, contract, ret):
| {"golden_diff": "diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py\n--- a/slither/tools/flattening/flattening.py\n+++ b/slither/tools/flattening/flattening.py\n@@ -93,6 +93,7 @@\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n+ exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n \n@@ -121,9 +122,6 @@\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n- if contract.name in exported:\n- return\n- exported.add(contract.name)\n list_contract.append(self._source_codes[contract])\n \n def _export(self, contract, ret):\n", "issue": "slither-flat does not handle cycle \nExample:\r\n```\r\n$ echo a.sol\r\nimport './b.sol';\r\n\r\ncontract A{\r\n\r\n B b;\r\n\r\n}\r\n$ echo b.sol\r\nimport './a.sol';\r\n\r\ncontract B{\r\n\r\n A a;\r\n\r\n}\r\n```\nslither-flat does not handle cycle \nExample:\r\n```\r\n$ echo a.sol\r\nimport './b.sol';\r\n\r\ncontract A{\r\n\r\n B b;\r\n\r\n}\r\n$ echo b.sol\r\nimport './a.sol';\r\n\r\ncontract B{\r\n\r\n A a;\r\n\r\n}\r\n```\n", "before_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\nclass Flattening:\n\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._use_abi_encoder_v2 = False\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']]\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n # to_patch is a list of (index, bool). The bool indicates\n # if the index is for external -> public (true)\n # or a calldata -> memory (false)\n to_patch = []\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append((attributes_start + regex.span()[0] + 1, True))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append((calldata_start + calldata_idx + 1, False))\n\n to_patch.sort(key=lambda x:x[0], reverse=True)\n\n content = content[start:end]\n for (index, is_external) in to_patch:\n index = index - start\n if is_external:\n content = content[:index] + 'public' + content[index + len('external'):]\n else:\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n content = content[start:end]\n\n self._source_codes[contract] = content\n\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and not t.type.contract in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and not t.type in exported:\n self._export_contract(t.type, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n if contract.name in exported:\n return\n exported.add(contract.name)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n\n", "path": "slither/tools/flattening/flattening.py"}], "after_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\nclass Flattening:\n\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._use_abi_encoder_v2 = False\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']]\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n # to_patch is a list of (index, bool). The bool indicates\n # if the index is for external -> public (true)\n # or a calldata -> memory (false)\n to_patch = []\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append((attributes_start + regex.span()[0] + 1, True))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append((calldata_start + calldata_idx + 1, False))\n\n to_patch.sort(key=lambda x:x[0], reverse=True)\n\n content = content[start:end]\n for (index, is_external) in to_patch:\n index = index - start\n if is_external:\n content = content[:index] + 'public' + content[index + len('external'):]\n else:\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n content = content[start:end]\n\n self._source_codes[contract] = content\n\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and not t.type.contract in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and not t.type in exported:\n self._export_contract(t.type, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n\n", "path": "slither/tools/flattening/flattening.py"}]} | 2,132 | 210 |
gh_patches_debug_10926 | rasdani/github-patches | git_diff | qutip__qutip-2398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
qutip.propagator and array with a single value
Hi,
I may have missed something but from the docstrings, I would expect the last two lines of this example script to actually return the same thing:
```python
>>> import numpy as np
>>> import qutip
>>> omega_0 = 1.0 * 2 * np.pi
>>> Omega = 0.1 * 2 * np.pi
>>> omega_d = 1.5 * 2 * np.pi
>>> T = 2 * np.pi / omega_d
>>> H0 = omega_0 / 2.0 * qutip.sigmaz()
>>> H1 = Omega * qutip.sigmax()
>>> H2 = Omega * qutip.sigmay()
>>> H = [H0, [H1, 'cos(w_d * t)'], [H2, 'sin(w_d * t)']]
>>> args = {'w_d': omega_d}
>>> (qutip.propagator(H, t, c_op_list=[], args=args) - qutip.propagator(H, [t], c_op_list=[], args=args)).norm()
0.04271636357907908
>>> (qutip.propagator(H, t, c_op_list=[], args=args) - qutip.propagator(H, [0, t], c_op_list=[], args=args)).norm()
0.0
```
That is, I would expect `qutip.propagator(H, t, c_op_list=[], args=args)` to give the same result as `qutip.propagator(H, [t], c_op_list=[], args=args)` (single `t` value or array of times with a single `t` element). This does not seem to be the case here, with a not negligible difference.
Is this an expected behavior (which would probably deserve more emphasis in the doc)?
Thanks!
Best,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutip/solver/propagator.py`
Content:
```
1 __all__ = ['Propagator', 'propagator', 'propagator_steadystate']
2
3 import numbers
4 import numpy as np
5
6 from .. import Qobj, qeye, qeye_like, unstack_columns, QobjEvo, liouvillian
7 from ..core import data as _data
8 from .mesolve import mesolve, MESolver
9 from .sesolve import sesolve, SESolver
10 from .heom.bofin_solvers import HEOMSolver
11 from .solver_base import Solver
12 from .multitraj import MultiTrajSolver
13
14
15 def propagator(H, t, c_ops=(), args=None, options=None, **kwargs):
16 r"""
17 Calculate the propagator U(t) for the density matrix or wave function such
18 that :math:`\psi(t) = U(t)\psi(0)` or
19 :math:`\rho_{\mathrm vec}(t) = U(t) \rho_{\mathrm vec}(0)`
20 where :math:`\rho_{\mathrm vec}` is the vector representation of the
21 density matrix.
22
23 Parameters
24 ----------
25 H : :obj:`.Qobj`, :obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format
26 Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or
27 QobjEvo. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`] or callable
28 that can be made into :obj:`.QobjEvo` are also accepted.
29
30 t : float or array-like
31 Time or list of times for which to evaluate the propagator.
32
33 c_ops : list, optional
34 List of Qobj or QobjEvo collapse operators.
35
36 args : dictionary, optional
37 Parameters to callback functions for time-dependent Hamiltonians and
38 collapse operators.
39
40 options : dict, optional
41 Options for the solver.
42
43 **kwargs :
44 Extra parameters to use when creating the
45 :obj:`.QobjEvo` from a list format ``H``.
46
47 Returns
48 -------
49 U : :obj:`.Qobj`, list
50 Instance representing the propagator(s) :math:`U(t)`. Return a single
51 Qobj when ``t`` is a number or a list when ``t`` is a list.
52
53 """
54 if isinstance(t, numbers.Real):
55 tlist = [0, t]
56 list_output = False
57 else:
58 tlist = t
59 list_output = True
60
61 if not isinstance(H, (Qobj, QobjEvo)):
62 H = QobjEvo(H, args=args, **kwargs)
63
64 if c_ops:
65 H = liouvillian(H, c_ops)
66
67 U0 = qeye_like(H)
68
69 if H.issuper:
70 out = mesolve(H, U0, tlist, args=args, options=options).states
71 else:
72 out = sesolve(H, U0, tlist, args=args, options=options).states
73
74 if list_output:
75 return out
76 else:
77 return out[-1]
78
79
80 def propagator_steadystate(U):
81 r"""Find the steady state for successive applications of the propagator
82 :math:`U`.
83
84 Parameters
85 ----------
86 U : :obj:`.Qobj`
87 Operator representing the propagator.
88
89 Returns
90 -------
91 a : :obj:`.Qobj`
92 Instance representing the steady-state density matrix.
93 """
94 evals, estates = U.eigenstates()
95 shifted_vals = np.abs(evals - 1.0)
96 ev_idx = np.argmin(shifted_vals)
97 rho_data = unstack_columns(estates[ev_idx].data)
98 rho_data = _data.mul(rho_data, 0.5 / _data.trace(rho_data))
99 return Qobj(_data.add(rho_data, _data.adjoint(rho_data)),
100 dims=U.dims[0],
101 isherm=True,
102 copy=False)
103
104
105 class Propagator:
106 """
107 A generator of propagator for a system.
108
109 Usage:
110
111 U = Propagator(H, c_ops)
112
113 psi_t = U(t) @ psi_0
114
115 Save some previously computed propagator are stored to speed up subsequent
116 computation. Changing ``args`` will erase these stored probagator.
117
118 Parameters
119 ----------
120 system : :obj:`.Qobj`, :obj:`.QobjEvo`, :class:`.Solver`
121 Possibly time-dependent system driving the evolution, either already
122 packaged in a solver, such as :class:`.SESolver` or :class:`.BRSolver`,
123 or the Liouvillian or Hamiltonian as a :obj:`.Qobj`,
124 :obj:`.QobjEvo`. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`]
125 or callable that can be made into :obj:`.QobjEvo` are also accepted.
126
127 Solvers that run non-deterministacilly, such as :class:`.MCSolver`, are
128 not supported.
129
130 c_ops : list, optional
131 List of :obj:`.Qobj` or :obj:`.QobjEvo` collapse operators.
132
133 args : dictionary, optional
134 Parameters to callback functions for time-dependent Hamiltonians and
135 collapse operators.
136
137 options : dict, optional
138 Options for the solver.
139
140 memoize : int, default: 10
141 Max number of propagator to save.
142
143 tol : float, default: 1e-14
144 Absolute tolerance for the time. If a previous propagator was computed
145 at a time within tolerance, that propagator will be returned.
146
147 Notes
148 -----
149 The :class:`Propagator` is not a :obj:`.QobjEvo` so
150 it cannot be used for operations with :obj:`.Qobj` or
151 :obj:`.QobjEvo`. It can be made into a
152 :obj:`.QobjEvo` with ::
153
154 U = QobjEvo(Propagator(H))
155
156 """
157 def __init__(self, system, *, c_ops=(), args=None, options=None,
158 memoize=10, tol=1e-14):
159 if isinstance(system, MultiTrajSolver):
160 raise TypeError("Non-deterministic solvers cannot be used "
161 "as a propagator system")
162 elif isinstance(system, HEOMSolver):
163 raise NotImplementedError(
164 "HEOM is not supported by Propagator. "
165 "Please, tell us on GitHub issues if you need it!"
166 )
167 elif isinstance(system, Solver):
168 self.solver = system
169 else:
170 Hevo = QobjEvo(system, args=args)
171 c_ops = [QobjEvo(op, args=args) for op in c_ops]
172 if Hevo.issuper or c_ops:
173 self.solver = MESolver(Hevo, c_ops=c_ops, options=options)
174 else:
175 self.solver = SESolver(Hevo, options=options)
176
177 self.times = [0]
178 self.invs = [None]
179 self.props = [qeye(self.solver.sys_dims)]
180 self.solver.start(self.props[0], self.times[0])
181 self.cte = self.solver.rhs.isconstant
182 H_0 = self.solver.rhs(0)
183 self.unitary = not H_0.issuper and H_0.isherm
184 self.args = args
185 self.memoize = max(3, int(memoize))
186 self.tol = tol
187
188 def _lookup_or_compute(self, t):
189 """
190 Get U(t) from cache or compute it.
191 """
192 idx = np.searchsorted(self.times, t)
193 if idx < len(self.times) and abs(t-self.times[idx]) <= self.tol:
194 U = self.props[idx]
195 elif idx > 0 and abs(t-self.times[idx-1]) <= self.tol:
196 U = self.props[idx-1]
197 else:
198 U = self._compute(t, idx)
199 self._insert(t, U, idx)
200 return U
201
202 def __call__(self, t, t_start=0, **args):
203 """
204 Get the propagator from ``t_start`` to ``t``.
205
206 Parameters
207 ----------
208 t : float
209 Time at which to compute the propagator.
210 t_start: float [0]
211 Time at which the propagator start such that:
212 ``psi[t] = U.prop(t, t_start) @ psi[t_start]``
213 args : dict
214 Argument to pass to a time dependent Hamiltonian.
215 Updating ``args`` take effect since ``t=0`` and the new ``args``
216 will be used in future call.
217 """
218 # We could improve it when the system is constant using U(2t) = U(t)**2
219 if not self.cte and args and args != self.args:
220 self.args = args
221 self.solver._argument(args)
222 self.times = [0]
223 self.props = [qeye_like(self.props[0])]
224 self.solver.start(self.props[0], self.times[0])
225
226 if t_start:
227 if t == t_start:
228 U = self._lookup_or_compute(0)
229 if self.cte:
230 U = self._lookup_or_compute(t - t_start)
231 else:
232 Uinv = self._inv(self._lookup_or_compute(t_start))
233 U = self._lookup_or_compute(t) @ Uinv
234 else:
235 U = self._lookup_or_compute(t)
236 return U
237
238 def inv(self, t, **args):
239 """
240 Get the inverse of the propagator at ``t``, such that
241 ``psi_0 = U.inv(t) @ psi_t``
242
243 Parameters
244 ----------
245 t : float
246 Time at which to compute the propagator.
247 args : dict
248 Argument to pass to a time dependent Hamiltonian.
249 Updating ``args`` take effect since ``t=0`` and the new ``args``
250 will be used in future call.
251 """
252 return self._inv(self(t, **args))
253
254 def _compute(self, t, idx):
255 """
256 Compute the propagator at ``t``, ``idx`` point to a pair of
257 (time, propagator) close to the desired time.
258 """
259 t_last = self.solver._integrator.get_state(copy=False)[0]
260 if self.times[idx-1] <= t_last <= t:
261 U = self.solver.step(t)
262 elif idx > 0:
263 self.solver.start(self.props[idx-1], self.times[idx-1])
264 U = self.solver.step(t)
265 else:
266 # Evolving backward in time is not supported by all integrator.
267 self.solver.start(qeye_like(self.props[0]), t)
268 Uinv = self.solver.step(self.times[idx])
269 U = self._inv(Uinv)
270 return U
271
272 def _inv(self, U):
273 return U.dag() if self.unitary else U.inv()
274
275 def _insert(self, t, U, idx):
276 """
277 Insert a new pair of (time, propagator) to the memorized states.
278 """
279 while len(self.times) >= self.memoize:
280 rm_idx = self.memoize // 2
281 if self.times[rm_idx] < t:
282 idx -= 1
283 del self.times[rm_idx]
284 del self.props[rm_idx]
285 self.times.insert(idx, t)
286 self.props.insert(idx, U)
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutip/solver/propagator.py b/qutip/solver/propagator.py
--- a/qutip/solver/propagator.py
+++ b/qutip/solver/propagator.py
@@ -28,7 +28,11 @@
that can be made into :obj:`.QobjEvo` are also accepted.
t : float or array-like
- Time or list of times for which to evaluate the propagator.
+ Time or list of times for which to evaluate the propagator. If a single
+ time ``t`` is passed, the propagator from ``0`` to ``t`` is computed.
+ When ``t`` is a list, the propagators from the first time in the list
+ to each elements in ``t`` is returned. In that case, the first output
+ will always be the identity matrix.
c_ops : list, optional
List of Qobj or QobjEvo collapse operators.
| {"golden_diff": "diff --git a/qutip/solver/propagator.py b/qutip/solver/propagator.py\n--- a/qutip/solver/propagator.py\n+++ b/qutip/solver/propagator.py\n@@ -28,7 +28,11 @@\n that can be made into :obj:`.QobjEvo` are also accepted.\n \n t : float or array-like\n- Time or list of times for which to evaluate the propagator.\n+ Time or list of times for which to evaluate the propagator. If a single\n+ time ``t`` is passed, the propagator from ``0`` to ``t`` is computed.\n+ When ``t`` is a list, the propagators from the first time in the list\n+ to each elements in ``t`` is returned. In that case, the first output\n+ will always be the identity matrix.\n \n c_ops : list, optional\n List of Qobj or QobjEvo collapse operators.\n", "issue": "qutip.propagator and array with a single value\nHi,\r\n\r\nI may have missed something but from the docstrings, I would expect the last two lines of this example script to actually return the same thing:\r\n\r\n```python\r\n>>> import numpy as np\r\n>>> import qutip\r\n>>> omega_0 = 1.0 * 2 * np.pi\r\n>>> Omega = 0.1 * 2 * np.pi\r\n>>> omega_d = 1.5 * 2 * np.pi\r\n>>> T = 2 * np.pi / omega_d\r\n\r\n>>> H0 = omega_0 / 2.0 * qutip.sigmaz()\r\n>>> H1 = Omega * qutip.sigmax()\r\n>>> H2 = Omega * qutip.sigmay()\r\n\r\n>>> H = [H0, [H1, 'cos(w_d * t)'], [H2, 'sin(w_d * t)']]\r\n>>> args = {'w_d': omega_d}\r\n\r\n>>> (qutip.propagator(H, t, c_op_list=[], args=args) - qutip.propagator(H, [t], c_op_list=[], args=args)).norm()\r\n0.04271636357907908\r\n\r\n>>> (qutip.propagator(H, t, c_op_list=[], args=args) - qutip.propagator(H, [0, t], c_op_list=[], args=args)).norm()\r\n0.0\r\n```\r\n\r\nThat is, I would expect `qutip.propagator(H, t, c_op_list=[], args=args)` to give the same result as `qutip.propagator(H, [t], c_op_list=[], args=args)` (single `t` value or array of times with a single `t` element). This does not seem to be the case here, with a not negligible difference.\r\n\r\nIs this an expected behavior (which would probably deserve more emphasis in the doc)?\r\n\r\nThanks!\r\nBest,\n", "before_files": [{"content": "__all__ = ['Propagator', 'propagator', 'propagator_steadystate']\n\nimport numbers\nimport numpy as np\n\nfrom .. import Qobj, qeye, qeye_like, unstack_columns, QobjEvo, liouvillian\nfrom ..core import data as _data\nfrom .mesolve import mesolve, MESolver\nfrom .sesolve import sesolve, SESolver\nfrom .heom.bofin_solvers import HEOMSolver\nfrom .solver_base import Solver\nfrom .multitraj import MultiTrajSolver\n\n\ndef propagator(H, t, c_ops=(), args=None, options=None, **kwargs):\n r\"\"\"\n Calculate the propagator U(t) for the density matrix or wave function such\n that :math:`\\psi(t) = U(t)\\psi(0)` or\n :math:`\\rho_{\\mathrm vec}(t) = U(t) \\rho_{\\mathrm vec}(0)`\n where :math:`\\rho_{\\mathrm vec}` is the vector representation of the\n density matrix.\n\n Parameters\n ----------\n H : :obj:`.Qobj`, :obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format\n Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or\n QobjEvo. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`] or callable\n that can be made into :obj:`.QobjEvo` are also accepted.\n\n t : float or array-like\n Time or list of times for which to evaluate the propagator.\n\n c_ops : list, optional\n List of Qobj or QobjEvo collapse operators.\n\n args : dictionary, optional\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : dict, optional\n Options for the solver.\n\n **kwargs :\n Extra parameters to use when creating the\n :obj:`.QobjEvo` from a list format ``H``.\n\n Returns\n -------\n U : :obj:`.Qobj`, list\n Instance representing the propagator(s) :math:`U(t)`. Return a single\n Qobj when ``t`` is a number or a list when ``t`` is a list.\n\n \"\"\"\n if isinstance(t, numbers.Real):\n tlist = [0, t]\n list_output = False\n else:\n tlist = t\n list_output = True\n\n if not isinstance(H, (Qobj, QobjEvo)):\n H = QobjEvo(H, args=args, **kwargs)\n\n if c_ops:\n H = liouvillian(H, c_ops)\n\n U0 = qeye_like(H)\n\n if H.issuper:\n out = mesolve(H, U0, tlist, args=args, options=options).states\n else:\n out = sesolve(H, U0, tlist, args=args, options=options).states\n\n if list_output:\n return out\n else:\n return out[-1]\n\n\ndef propagator_steadystate(U):\n r\"\"\"Find the steady state for successive applications of the propagator\n :math:`U`.\n\n Parameters\n ----------\n U : :obj:`.Qobj`\n Operator representing the propagator.\n\n Returns\n -------\n a : :obj:`.Qobj`\n Instance representing the steady-state density matrix.\n \"\"\"\n evals, estates = U.eigenstates()\n shifted_vals = np.abs(evals - 1.0)\n ev_idx = np.argmin(shifted_vals)\n rho_data = unstack_columns(estates[ev_idx].data)\n rho_data = _data.mul(rho_data, 0.5 / _data.trace(rho_data))\n return Qobj(_data.add(rho_data, _data.adjoint(rho_data)),\n dims=U.dims[0],\n isherm=True,\n copy=False)\n\n\nclass Propagator:\n \"\"\"\n A generator of propagator for a system.\n\n Usage:\n\n U = Propagator(H, c_ops)\n\n psi_t = U(t) @ psi_0\n\n Save some previously computed propagator are stored to speed up subsequent\n computation. Changing ``args`` will erase these stored probagator.\n\n Parameters\n ----------\n system : :obj:`.Qobj`, :obj:`.QobjEvo`, :class:`.Solver`\n Possibly time-dependent system driving the evolution, either already\n packaged in a solver, such as :class:`.SESolver` or :class:`.BRSolver`,\n or the Liouvillian or Hamiltonian as a :obj:`.Qobj`,\n :obj:`.QobjEvo`. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`]\n or callable that can be made into :obj:`.QobjEvo` are also accepted.\n\n Solvers that run non-deterministacilly, such as :class:`.MCSolver`, are\n not supported.\n\n c_ops : list, optional\n List of :obj:`.Qobj` or :obj:`.QobjEvo` collapse operators.\n\n args : dictionary, optional\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : dict, optional\n Options for the solver.\n\n memoize : int, default: 10\n Max number of propagator to save.\n\n tol : float, default: 1e-14\n Absolute tolerance for the time. If a previous propagator was computed\n at a time within tolerance, that propagator will be returned.\n\n Notes\n -----\n The :class:`Propagator` is not a :obj:`.QobjEvo` so\n it cannot be used for operations with :obj:`.Qobj` or\n :obj:`.QobjEvo`. It can be made into a\n :obj:`.QobjEvo` with ::\n\n U = QobjEvo(Propagator(H))\n\n \"\"\"\n def __init__(self, system, *, c_ops=(), args=None, options=None,\n memoize=10, tol=1e-14):\n if isinstance(system, MultiTrajSolver):\n raise TypeError(\"Non-deterministic solvers cannot be used \"\n \"as a propagator system\")\n elif isinstance(system, HEOMSolver):\n raise NotImplementedError(\n \"HEOM is not supported by Propagator. \"\n \"Please, tell us on GitHub issues if you need it!\"\n )\n elif isinstance(system, Solver):\n self.solver = system\n else:\n Hevo = QobjEvo(system, args=args)\n c_ops = [QobjEvo(op, args=args) for op in c_ops]\n if Hevo.issuper or c_ops:\n self.solver = MESolver(Hevo, c_ops=c_ops, options=options)\n else:\n self.solver = SESolver(Hevo, options=options)\n\n self.times = [0]\n self.invs = [None]\n self.props = [qeye(self.solver.sys_dims)]\n self.solver.start(self.props[0], self.times[0])\n self.cte = self.solver.rhs.isconstant\n H_0 = self.solver.rhs(0)\n self.unitary = not H_0.issuper and H_0.isherm\n self.args = args\n self.memoize = max(3, int(memoize))\n self.tol = tol\n\n def _lookup_or_compute(self, t):\n \"\"\"\n Get U(t) from cache or compute it.\n \"\"\"\n idx = np.searchsorted(self.times, t)\n if idx < len(self.times) and abs(t-self.times[idx]) <= self.tol:\n U = self.props[idx]\n elif idx > 0 and abs(t-self.times[idx-1]) <= self.tol:\n U = self.props[idx-1]\n else:\n U = self._compute(t, idx)\n self._insert(t, U, idx)\n return U\n\n def __call__(self, t, t_start=0, **args):\n \"\"\"\n Get the propagator from ``t_start`` to ``t``.\n\n Parameters\n ----------\n t : float\n Time at which to compute the propagator.\n t_start: float [0]\n Time at which the propagator start such that:\n ``psi[t] = U.prop(t, t_start) @ psi[t_start]``\n args : dict\n Argument to pass to a time dependent Hamiltonian.\n Updating ``args`` take effect since ``t=0`` and the new ``args``\n will be used in future call.\n \"\"\"\n # We could improve it when the system is constant using U(2t) = U(t)**2\n if not self.cte and args and args != self.args:\n self.args = args\n self.solver._argument(args)\n self.times = [0]\n self.props = [qeye_like(self.props[0])]\n self.solver.start(self.props[0], self.times[0])\n\n if t_start:\n if t == t_start:\n U = self._lookup_or_compute(0)\n if self.cte:\n U = self._lookup_or_compute(t - t_start)\n else:\n Uinv = self._inv(self._lookup_or_compute(t_start))\n U = self._lookup_or_compute(t) @ Uinv\n else:\n U = self._lookup_or_compute(t)\n return U\n\n def inv(self, t, **args):\n \"\"\"\n Get the inverse of the propagator at ``t``, such that\n ``psi_0 = U.inv(t) @ psi_t``\n\n Parameters\n ----------\n t : float\n Time at which to compute the propagator.\n args : dict\n Argument to pass to a time dependent Hamiltonian.\n Updating ``args`` take effect since ``t=0`` and the new ``args``\n will be used in future call.\n \"\"\"\n return self._inv(self(t, **args))\n\n def _compute(self, t, idx):\n \"\"\"\n Compute the propagator at ``t``, ``idx`` point to a pair of\n (time, propagator) close to the desired time.\n \"\"\"\n t_last = self.solver._integrator.get_state(copy=False)[0]\n if self.times[idx-1] <= t_last <= t:\n U = self.solver.step(t)\n elif idx > 0:\n self.solver.start(self.props[idx-1], self.times[idx-1])\n U = self.solver.step(t)\n else:\n # Evolving backward in time is not supported by all integrator.\n self.solver.start(qeye_like(self.props[0]), t)\n Uinv = self.solver.step(self.times[idx])\n U = self._inv(Uinv)\n return U\n\n def _inv(self, U):\n return U.dag() if self.unitary else U.inv()\n\n def _insert(self, t, U, idx):\n \"\"\"\n Insert a new pair of (time, propagator) to the memorized states.\n \"\"\"\n while len(self.times) >= self.memoize:\n rm_idx = self.memoize // 2\n if self.times[rm_idx] < t:\n idx -= 1\n del self.times[rm_idx]\n del self.props[rm_idx]\n self.times.insert(idx, t)\n self.props.insert(idx, U)\n", "path": "qutip/solver/propagator.py"}], "after_files": [{"content": "__all__ = ['Propagator', 'propagator', 'propagator_steadystate']\n\nimport numbers\nimport numpy as np\n\nfrom .. import Qobj, qeye, qeye_like, unstack_columns, QobjEvo, liouvillian\nfrom ..core import data as _data\nfrom .mesolve import mesolve, MESolver\nfrom .sesolve import sesolve, SESolver\nfrom .heom.bofin_solvers import HEOMSolver\nfrom .solver_base import Solver\nfrom .multitraj import MultiTrajSolver\n\n\ndef propagator(H, t, c_ops=(), args=None, options=None, **kwargs):\n r\"\"\"\n Calculate the propagator U(t) for the density matrix or wave function such\n that :math:`\\psi(t) = U(t)\\psi(0)` or\n :math:`\\rho_{\\mathrm vec}(t) = U(t) \\rho_{\\mathrm vec}(0)`\n where :math:`\\rho_{\\mathrm vec}` is the vector representation of the\n density matrix.\n\n Parameters\n ----------\n H : :obj:`.Qobj`, :obj:`.QobjEvo`, :obj:`.QobjEvo` compatible format\n Possibly time-dependent system Liouvillian or Hamiltonian as a Qobj or\n QobjEvo. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`] or callable\n that can be made into :obj:`.QobjEvo` are also accepted.\n\n t : float or array-like\n Time or list of times for which to evaluate the propagator. If a single\n time ``t`` is passed, the propagator from ``0`` to ``t`` is computed.\n When ``t`` is a list, the propagators from the first time in the list\n to each elements in ``t`` is returned. In that case, the first output\n will always be the identity matrix.\n\n c_ops : list, optional\n List of Qobj or QobjEvo collapse operators.\n\n args : dictionary, optional\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : dict, optional\n Options for the solver.\n\n **kwargs :\n Extra parameters to use when creating the\n :obj:`.QobjEvo` from a list format ``H``.\n\n Returns\n -------\n U : :obj:`.Qobj`, list\n Instance representing the propagator(s) :math:`U(t)`. Return a single\n Qobj when ``t`` is a number or a list when ``t`` is a list.\n\n \"\"\"\n if isinstance(t, numbers.Real):\n tlist = [0, t]\n list_output = False\n else:\n tlist = t\n list_output = True\n\n if not isinstance(H, (Qobj, QobjEvo)):\n H = QobjEvo(H, args=args, **kwargs)\n\n if c_ops:\n H = liouvillian(H, c_ops)\n\n U0 = qeye_like(H)\n\n if H.issuper:\n out = mesolve(H, U0, tlist, args=args, options=options).states\n else:\n out = sesolve(H, U0, tlist, args=args, options=options).states\n\n if list_output:\n return out\n else:\n return out[-1]\n\n\ndef propagator_steadystate(U):\n r\"\"\"Find the steady state for successive applications of the propagator\n :math:`U`.\n\n Parameters\n ----------\n U : :obj:`.Qobj`\n Operator representing the propagator.\n\n Returns\n -------\n a : :obj:`.Qobj`\n Instance representing the steady-state density matrix.\n \"\"\"\n evals, estates = U.eigenstates()\n shifted_vals = np.abs(evals - 1.0)\n ev_idx = np.argmin(shifted_vals)\n rho_data = unstack_columns(estates[ev_idx].data)\n rho_data = _data.mul(rho_data, 0.5 / _data.trace(rho_data))\n return Qobj(_data.add(rho_data, _data.adjoint(rho_data)),\n dims=U.dims[0],\n isherm=True,\n copy=False)\n\n\nclass Propagator:\n \"\"\"\n A generator of propagator for a system.\n\n Usage:\n\n U = Propagator(H, c_ops)\n\n psi_t = U(t) @ psi_0\n\n Save some previously computed propagator are stored to speed up subsequent\n computation. Changing ``args`` will erase these stored probagator.\n\n Parameters\n ----------\n system : :obj:`.Qobj`, :obj:`.QobjEvo`, :class:`.Solver`\n Possibly time-dependent system driving the evolution, either already\n packaged in a solver, such as :class:`.SESolver` or :class:`.BRSolver`,\n or the Liouvillian or Hamiltonian as a :obj:`.Qobj`,\n :obj:`.QobjEvo`. ``list`` of [:obj:`.Qobj`, :obj:`.Coefficient`]\n or callable that can be made into :obj:`.QobjEvo` are also accepted.\n\n Solvers that run non-deterministacilly, such as :class:`.MCSolver`, are\n not supported.\n\n c_ops : list, optional\n List of :obj:`.Qobj` or :obj:`.QobjEvo` collapse operators.\n\n args : dictionary, optional\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : dict, optional\n Options for the solver.\n\n memoize : int, default: 10\n Max number of propagator to save.\n\n tol : float, default: 1e-14\n Absolute tolerance for the time. If a previous propagator was computed\n at a time within tolerance, that propagator will be returned.\n\n Notes\n -----\n The :class:`Propagator` is not a :obj:`.QobjEvo` so\n it cannot be used for operations with :obj:`.Qobj` or\n :obj:`.QobjEvo`. It can be made into a\n :obj:`.QobjEvo` with ::\n\n U = QobjEvo(Propagator(H))\n\n \"\"\"\n def __init__(self, system, *, c_ops=(), args=None, options=None,\n memoize=10, tol=1e-14):\n if isinstance(system, MultiTrajSolver):\n raise TypeError(\"Non-deterministic solvers cannot be used \"\n \"as a propagator system\")\n elif isinstance(system, HEOMSolver):\n raise NotImplementedError(\n \"HEOM is not supported by Propagator. \"\n \"Please, tell us on GitHub issues if you need it!\"\n )\n elif isinstance(system, Solver):\n self.solver = system\n else:\n Hevo = QobjEvo(system, args=args)\n c_ops = [QobjEvo(op, args=args) for op in c_ops]\n if Hevo.issuper or c_ops:\n self.solver = MESolver(Hevo, c_ops=c_ops, options=options)\n else:\n self.solver = SESolver(Hevo, options=options)\n\n self.times = [0]\n self.invs = [None]\n self.props = [qeye(self.solver.sys_dims)]\n self.solver.start(self.props[0], self.times[0])\n self.cte = self.solver.rhs.isconstant\n H_0 = self.solver.rhs(0)\n self.unitary = not H_0.issuper and H_0.isherm\n self.args = args\n self.memoize = max(3, int(memoize))\n self.tol = tol\n\n def _lookup_or_compute(self, t):\n \"\"\"\n Get U(t) from cache or compute it.\n \"\"\"\n idx = np.searchsorted(self.times, t)\n if idx < len(self.times) and abs(t-self.times[idx]) <= self.tol:\n U = self.props[idx]\n elif idx > 0 and abs(t-self.times[idx-1]) <= self.tol:\n U = self.props[idx-1]\n else:\n U = self._compute(t, idx)\n self._insert(t, U, idx)\n return U\n\n def __call__(self, t, t_start=0, **args):\n \"\"\"\n Get the propagator from ``t_start`` to ``t``.\n\n Parameters\n ----------\n t : float\n Time at which to compute the propagator.\n t_start: float [0]\n Time at which the propagator start such that:\n ``psi[t] = U.prop(t, t_start) @ psi[t_start]``\n args : dict\n Argument to pass to a time dependent Hamiltonian.\n Updating ``args`` take effect since ``t=0`` and the new ``args``\n will be used in future call.\n \"\"\"\n # We could improve it when the system is constant using U(2t) = U(t)**2\n if not self.cte and args and args != self.args:\n self.args = args\n self.solver._argument(args)\n self.times = [0]\n self.props = [qeye_like(self.props[0])]\n self.solver.start(self.props[0], self.times[0])\n\n if t_start:\n if t == t_start:\n U = self._lookup_or_compute(0)\n if self.cte:\n U = self._lookup_or_compute(t - t_start)\n else:\n Uinv = self._inv(self._lookup_or_compute(t_start))\n U = self._lookup_or_compute(t) @ Uinv\n else:\n U = self._lookup_or_compute(t)\n return U\n\n def inv(self, t, **args):\n \"\"\"\n Get the inverse of the propagator at ``t``, such that\n ``psi_0 = U.inv(t) @ psi_t``\n\n Parameters\n ----------\n t : float\n Time at which to compute the propagator.\n args : dict\n Argument to pass to a time dependent Hamiltonian.\n Updating ``args`` take effect since ``t=0`` and the new ``args``\n will be used in future call.\n \"\"\"\n return self._inv(self(t, **args))\n\n def _compute(self, t, idx):\n \"\"\"\n Compute the propagator at ``t``, ``idx`` point to a pair of\n (time, propagator) close to the desired time.\n \"\"\"\n t_last = self.solver._integrator.get_state(copy=False)[0]\n if self.times[idx-1] <= t_last <= t:\n U = self.solver.step(t)\n elif idx > 0:\n self.solver.start(self.props[idx-1], self.times[idx-1])\n U = self.solver.step(t)\n else:\n # Evolving backward in time is not supported by all integrator.\n self.solver.start(qeye_like(self.props[0]), t)\n Uinv = self.solver.step(self.times[idx])\n U = self._inv(Uinv)\n return U\n\n def _inv(self, U):\n return U.dag() if self.unitary else U.inv()\n\n def _insert(self, t, U, idx):\n \"\"\"\n Insert a new pair of (time, propagator) to the memorized states.\n \"\"\"\n while len(self.times) >= self.memoize:\n rm_idx = self.memoize // 2\n if self.times[rm_idx] < t:\n idx -= 1\n del self.times[rm_idx]\n del self.props[rm_idx]\n self.times.insert(idx, t)\n self.props.insert(idx, U)\n", "path": "qutip/solver/propagator.py"}]} | 3,951 | 222 |
gh_patches_debug_16882 | rasdani/github-patches | git_diff | learningequality__kolibri-1535 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Learn's Search is broken
## Summary
* Search within Learn is currently broken on Develop.
* See [triggerSearch()](https://github.com/learningequality/kolibri/blob/develop/kolibri/plugins/learn/assets/src/state/actions.js#L377)
## Exception
`Exception Value: 'list' object has no attribute 'values_list'`
## How to reproduce
1. Search for something within Learn
## Real-life consequences
:rage4:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/content/serializers.py`
Content:
```
1 from django.db.models import Manager
2 from django.db.models.query import RawQuerySet
3 from kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File
4 from rest_framework import serializers
5
6 from .content_db_router import default_database_is_attached, get_active_content_database
7
8
9 class ChannelMetadataCacheSerializer(serializers.ModelSerializer):
10
11 class Meta:
12 model = ChannelMetadataCache
13 fields = ('root_pk', 'id', 'name', 'description', 'author')
14
15
16 class FileSerializer(serializers.ModelSerializer):
17 storage_url = serializers.SerializerMethodField()
18 preset = serializers.SerializerMethodField()
19 download_url = serializers.SerializerMethodField()
20
21 def get_storage_url(self, target_node):
22 return target_node.get_storage_url()
23
24 def get_preset(self, target_node):
25 return target_node.get_preset()
26
27 def get_download_url(self, target_node):
28 return target_node.get_download_url()
29
30 class Meta:
31 model = File
32 fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',
33 'supplementary', 'thumbnail', 'download_url')
34
35
36 class AssessmentMetaDataSerializer(serializers.ModelSerializer):
37
38 assessment_item_ids = serializers.JSONField(default='[]')
39 mastery_model = serializers.JSONField(default='{}')
40
41 class Meta:
42 model = AssessmentMetaData
43 fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )
44
45
46 def get_progress_fraction(content_id, user):
47 from kolibri.logger.models import ContentSummaryLog
48 try:
49 # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress
50 overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content_id).progress
51 except ContentSummaryLog.DoesNotExist:
52 return None
53 return round(overall_progress, 4)
54
55
56 def get_progress_fractions(nodes, user):
57 from kolibri.logger.models import ContentSummaryLog
58 if isinstance(nodes, RawQuerySet):
59 leaf_ids = [datum.content_id for datum in nodes]
60 else:
61 leaf_ids = nodes.values_list("content_id", flat=True)
62
63 # get all summary logs for the current user that correspond to the descendant content nodes
64 if default_database_is_attached(): # if possible, do a direct join between the content and default databases
65 channel_alias = get_active_content_database()
66 summary_logs = ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=leaf_ids)
67 else: # otherwise, convert the leaf queryset into a flat list of ids and use that
68 summary_logs = ContentSummaryLog.objects.filter(user=user, content_id__in=list(leaf_ids))
69
70 # make a lookup dict for all logs to allow mapping from content_id to current progress
71 overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
72 return overall_progress
73
74
75 class ContentNodeListSerializer(serializers.ListSerializer):
76
77 def to_representation(self, data):
78
79 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
80 progress_dict = {}
81 else:
82 user = self.context["request"].user
83 progress_dict = get_progress_fractions(data, user)
84
85 # Dealing with nested relationships, data can be a Manager,
86 # so, first get a queryset from the Manager if needed
87 iterable = data.all() if isinstance(data, Manager) else data
88
89 return [
90 self.child.to_representation(item, progress_dict.get(item.content_id)) for item in iterable
91 ]
92
93
94 class ContentNodeSerializer(serializers.ModelSerializer):
95 parent = serializers.PrimaryKeyRelatedField(read_only=True)
96 files = FileSerializer(many=True, read_only=True)
97 assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)
98 license = serializers.StringRelatedField(many=False)
99 license_description = serializers.SerializerMethodField()
100
101 def __init__(self, *args, **kwargs):
102 # Instantiate the superclass normally
103 super(ContentNodeSerializer, self).__init__(*args, **kwargs)
104
105 # enable dynamic fields specification!
106 if 'request' in self.context and self.context['request'].GET.get('fields', None):
107 fields = self.context['request'].GET['fields'].split(',')
108 # Drop any fields that are not specified in the `fields` argument.
109 allowed = set(fields)
110 existing = set(self.fields.keys())
111 for field_name in existing - allowed:
112 self.fields.pop(field_name)
113
114 def to_representation(self, instance, progress_fraction=None):
115 if progress_fraction is None:
116 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
117 progress_fraction = 0
118 else:
119 user = self.context["request"].user
120 progress_fraction = get_progress_fraction(instance.content_id, user)
121 value = super(ContentNodeSerializer, self).to_representation(instance)
122 value['progress_fraction'] = progress_fraction
123 return value
124
125 def get_license_description(self, target_node):
126 if target_node.license_id:
127 return target_node.license.license_description
128 return ''
129
130 class Meta:
131 model = ContentNode
132 fields = (
133 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',
134 'license', 'license_description', 'files', 'parent', 'author',
135 'assessmentmetadata',
136 )
137
138 list_serializer_class = ContentNodeListSerializer
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py
--- a/kolibri/content/serializers.py
+++ b/kolibri/content/serializers.py
@@ -55,7 +55,7 @@
def get_progress_fractions(nodes, user):
from kolibri.logger.models import ContentSummaryLog
- if isinstance(nodes, RawQuerySet):
+ if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):
leaf_ids = [datum.content_id for datum in nodes]
else:
leaf_ids = nodes.values_list("content_id", flat=True)
@@ -76,6 +76,9 @@
def to_representation(self, data):
+ if not data:
+ return data
+
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
progress_dict = {}
else:
| {"golden_diff": "diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py\n--- a/kolibri/content/serializers.py\n+++ b/kolibri/content/serializers.py\n@@ -55,7 +55,7 @@\n \n def get_progress_fractions(nodes, user):\n from kolibri.logger.models import ContentSummaryLog\n- if isinstance(nodes, RawQuerySet):\n+ if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.values_list(\"content_id\", flat=True)\n@@ -76,6 +76,9 @@\n \n def to_representation(self, data):\n \n+ if not data:\n+ return data\n+\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n", "issue": "Learn's Search is broken\n## Summary\r\n\r\n* Search within Learn is currently broken on Develop.\r\n* See [triggerSearch()](https://github.com/learningequality/kolibri/blob/develop/kolibri/plugins/learn/assets/src/state/actions.js#L377)\r\n\r\n## Exception\r\n\r\n`Exception Value: 'list' object has no attribute 'values_list'`\r\n\r\n## How to reproduce\r\n\r\n1. Search for something within Learn\r\n\r\n## Real-life consequences\r\n\r\n:rage4: \n", "before_files": [{"content": "from django.db.models import Manager\nfrom django.db.models.query import RawQuerySet\nfrom kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File\nfrom rest_framework import serializers\n\nfrom .content_db_router import default_database_is_attached, get_active_content_database\n\n\nclass ChannelMetadataCacheSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = ChannelMetadataCache\n fields = ('root_pk', 'id', 'name', 'description', 'author')\n\n\nclass FileSerializer(serializers.ModelSerializer):\n storage_url = serializers.SerializerMethodField()\n preset = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_preset(self, target_node):\n return target_node.get_preset()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n class Meta:\n model = File\n fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',\n 'supplementary', 'thumbnail', 'download_url')\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default='[]')\n mastery_model = serializers.JSONField(default='{}')\n\n class Meta:\n model = AssessmentMetaData\n fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )\n\n\ndef get_progress_fraction(content_id, user):\n from kolibri.logger.models import ContentSummaryLog\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content_id).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_progress_fractions(nodes, user):\n from kolibri.logger.models import ContentSummaryLog\n if isinstance(nodes, RawQuerySet):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.values_list(\"content_id\", flat=True)\n\n # get all summary logs for the current user that correspond to the descendant content nodes\n if default_database_is_attached(): # if possible, do a direct join between the content and default databases\n channel_alias = get_active_content_database()\n summary_logs = ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=leaf_ids)\n else: # otherwise, convert the leaf queryset into a flat list of ids and use that\n summary_logs = ContentSummaryLog.objects.filter(user=user, content_id__in=list(leaf_ids))\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n progress_dict = get_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(item, progress_dict.get(item.content_id)) for item in iterable\n ]\n\n\nclass ContentNodeSerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)\n license = serializers.StringRelatedField(many=False)\n license_description = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(ContentNodeSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if 'request' in self.context and self.context['request'].GET.get('fields', None):\n fields = self.context['request'].GET['fields'].split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n def to_representation(self, instance, progress_fraction=None):\n if progress_fraction is None:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = get_progress_fraction(instance.content_id, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value['progress_fraction'] = progress_fraction\n return value\n\n def get_license_description(self, target_node):\n if target_node.license_id:\n return target_node.license.license_description\n return ''\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',\n 'license', 'license_description', 'files', 'parent', 'author',\n 'assessmentmetadata',\n )\n\n list_serializer_class = ContentNodeListSerializer\n", "path": "kolibri/content/serializers.py"}], "after_files": [{"content": "from django.db.models import Manager\nfrom django.db.models.query import RawQuerySet\nfrom kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File\nfrom rest_framework import serializers\n\nfrom .content_db_router import default_database_is_attached, get_active_content_database\n\n\nclass ChannelMetadataCacheSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = ChannelMetadataCache\n fields = ('root_pk', 'id', 'name', 'description', 'author')\n\n\nclass FileSerializer(serializers.ModelSerializer):\n storage_url = serializers.SerializerMethodField()\n preset = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_preset(self, target_node):\n return target_node.get_preset()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n class Meta:\n model = File\n fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',\n 'supplementary', 'thumbnail', 'download_url')\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default='[]')\n mastery_model = serializers.JSONField(default='{}')\n\n class Meta:\n model = AssessmentMetaData\n fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )\n\n\ndef get_progress_fraction(content_id, user):\n from kolibri.logger.models import ContentSummaryLog\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content_id).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_progress_fractions(nodes, user):\n from kolibri.logger.models import ContentSummaryLog\n if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.values_list(\"content_id\", flat=True)\n\n # get all summary logs for the current user that correspond to the descendant content nodes\n if default_database_is_attached(): # if possible, do a direct join between the content and default databases\n channel_alias = get_active_content_database()\n summary_logs = ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=leaf_ids)\n else: # otherwise, convert the leaf queryset into a flat list of ids and use that\n summary_logs = ContentSummaryLog.objects.filter(user=user, content_id__in=list(leaf_ids))\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n if not data:\n return data\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n progress_dict = get_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(item, progress_dict.get(item.content_id)) for item in iterable\n ]\n\n\nclass ContentNodeSerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)\n license = serializers.StringRelatedField(many=False)\n license_description = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(ContentNodeSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if 'request' in self.context and self.context['request'].GET.get('fields', None):\n fields = self.context['request'].GET['fields'].split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n def to_representation(self, instance, progress_fraction=None):\n if progress_fraction is None:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = get_progress_fraction(instance.content_id, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value['progress_fraction'] = progress_fraction\n return value\n\n def get_license_description(self, target_node):\n if target_node.license_id:\n return target_node.license.license_description\n return ''\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',\n 'license', 'license_description', 'files', 'parent', 'author',\n 'assessmentmetadata',\n )\n\n list_serializer_class = ContentNodeListSerializer\n", "path": "kolibri/content/serializers.py"}]} | 1,868 | 198 |
gh_patches_debug_20441 | rasdani/github-patches | git_diff | acl-org__acl-anthology-675 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`similar` field in name_variants.yaml should be symmetric and transitive
This comes up in #615.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/anthology/index.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Marcel Bollmann <[email protected]>
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import logging as log
18 import re
19 import yaml
20 from collections import defaultdict, Counter
21 from slugify import slugify
22 from stop_words import get_stop_words
23 from .formatter import bibtex_encode
24 from .people import PersonName
25 from .venues import VenueIndex
26
27 from typing import List
28
29 try:
30 from yaml import CLoader as Loader
31 except ImportError:
32 from yaml import Loader
33
34
35 BIBKEY_MAX_NAMES = 2
36
37
38 def load_stopwords(language):
39 return [t for w in get_stop_words(language) for t in slugify(w).split("-")]
40
41
42 class AnthologyIndex:
43 """Keeps an index of persons, their associated papers, paper bibliography
44 keys, etc.."""
45
46 def __init__(self, parent, srcdir=None):
47 self._parent = parent
48 self.bibkeys = set()
49 self.stopwords = load_stopwords("en")
50 self.id_to_canonical = {} # maps ids to canonical names
51 self.id_to_used = defaultdict(set) # maps ids to all names actually used
52 self.name_to_ids = defaultdict(list) # maps canonical/variant names to ids
53 self.coauthors = defaultdict(Counter) # maps ids to co-author ids
54 self.comments = {} # maps ids to comments (used for distinguishing authors with same name)
55 self.similar = defaultdict(set)
56 self.id_to_papers = defaultdict(lambda: defaultdict(list)) # id -> role -> papers
57 self.name_to_papers = defaultdict(lambda: defaultdict(list)) # name -> (explicit id?) -> papers; used only for error checking
58 if srcdir is not None:
59 self.load_variant_list(srcdir)
60
61 def load_variant_list(self, directory):
62 with open("{}/yaml/name_variants.yaml".format(directory), "r") as f:
63 name_list = yaml.load(f, Loader=Loader)
64
65 # Reserve ids for people with explicit ids in variant list
66 for entry in name_list:
67 if "id" in entry:
68 id_ = entry["id"]
69 canonical = entry["canonical"]
70 canonical = PersonName.from_dict(canonical)
71 self.set_canonical_name(id_, canonical)
72 # Automatically add people with same canonical name to similar list
73 for name, ids in self.name_to_ids.items():
74 if len(ids) > 1:
75 for id1 in ids:
76 for id2 in ids:
77 if id2 != id1:
78 self.similar[id1].add(id2)
79 for entry in name_list:
80 try:
81 canonical = entry["canonical"]
82 variants = entry.get("variants", [])
83 id_ = entry.get("id", None)
84 except (KeyError, TypeError):
85 log.error("Couldn't parse name variant entry: {}".format(entry))
86 continue
87 canonical = PersonName.from_dict(canonical)
88 if id_ is None:
89 if canonical in self.name_to_ids:
90 log.error(
91 "Canonical name '{}' is ambiguous but doesn't have an id; please add one". format(canonical))
92 id_ = self.fresh_id(canonical)
93 self.set_canonical_name(id_, canonical)
94 for variant in variants:
95 variant = PersonName.from_dict(variant)
96 if variant in self.name_to_ids:
97 log.error(
98 "Tried to add '{}' as variant of '{}', but is already a variant of '{}'".format(
99 repr(variant),
100 repr(canonical),
101 repr(self.id_to_canonical[
102 self.name_to_ids[variant][0]
103 ]),
104 )
105 )
106 continue
107 self.add_variant_name(id_, variant)
108 if "comment" in entry:
109 self.comments[id_] = entry["comment"]
110 if "similar" in entry:
111 self.similar[id_].update(entry["similar"])
112
113 def _is_stopword(self, word, paper):
114 """Determines if a given word should be considered a stopword for
115 the purpose of generating BibTeX keys."""
116 if word in self.stopwords:
117 return True
118 if paper.is_volume:
119 # Some simple heuristics to exclude probably uninformative words
120 # -- these are not perfect
121 if word in (
122 "proceedings",
123 "volume",
124 "conference",
125 "workshop",
126 "annual",
127 "meeting",
128 "computational",
129 ):
130 return True
131 elif (
132 re.match(r"[0-9]+(st|nd|rd|th)", word)
133 or word.endswith("ieth")
134 or word.endswith("enth")
135 or word
136 in (
137 "first",
138 "second",
139 "third",
140 "fourth",
141 "fifth",
142 "sixth",
143 "eighth",
144 "ninth",
145 "twelfth",
146 )
147 ):
148 return True
149 return False
150
151 def create_bibkey(self, paper):
152 """Create a unique bibliography key for the given paper."""
153 if paper.is_volume:
154 # Proceedings volumes use venue acronym instead of authors/editors
155 bibnames = slugify(self._parent.venues.get_by_letter(paper.full_id[0]))
156 else:
157 # Regular papers use author/editor names
158 names = paper.get("author")
159 if not names:
160 names = paper.get("editor", [])
161 if names:
162 if len(names) > BIBKEY_MAX_NAMES:
163 bibnames = "{}-etal".format(slugify(names[0][0].last))
164 else:
165 bibnames = "-".join(slugify(n.last) for n, _ in names)
166 else:
167 bibnames = "nn"
168 title = [
169 w
170 for w in slugify(paper.get_title("plain")).split("-")
171 if not self._is_stopword(w, paper)
172 ]
173 bibkey = "{}-{}-{}".format(bibnames, str(paper.get("year")), title.pop(0))
174 while bibkey in self.bibkeys: # guarantee uniqueness
175 if title:
176 bibkey += "-{}".format(title.pop(0))
177 else:
178 match = re.search(r"-([0-9][0-9]?)$", bibkey)
179 if match is not None:
180 num = int(match.group(1)) + 1
181 bibkey = bibkey[: -len(match.group(1))] + "{}".format(num)
182 else:
183 bibkey += "-2"
184 log.debug(
185 "New bibkey for clash that can't be resolved by adding title words: {}".format(
186 bibkey
187 )
188 )
189 self.bibkeys.add(bibkey)
190 return bibkey
191
192 def register(self, paper):
193 """Register all names associated with the given paper."""
194 from .papers import Paper
195
196 assert isinstance(paper, Paper), "Expected Paper, got {} ({})".format(
197 type(paper), repr(paper)
198 )
199 paper.bibkey = self.create_bibkey(paper)
200 for role in ("author", "editor"):
201 for name, id_ in paper.get(role, []):
202 if id_ is None:
203 if len(self.name_to_ids.get(name, [])) > 1:
204 log.error("Paper {} uses ambiguous name '{}' without id".format(paper.full_id, name))
205 log.error(" Please add an id, for example: {}".format(" ".join(self.name_to_ids[name])))
206 id_ = self.resolve_name(name)["id"]
207 explicit = False
208 else:
209 if id_ not in self.id_to_canonical:
210 log.error("Paper {} uses name '{}' with id '{}' that does not exist".format(paper.full_id, name, id_))
211 explicit = True
212
213 self.id_to_used[id_].add(name)
214 # Register paper
215 self.id_to_papers[id_][role].append(paper.full_id)
216 self.name_to_papers[name][explicit].append(paper.full_id)
217 # Register co-author(s)
218 for co_name, co_id in paper.get(role):
219 if co_id is None:
220 co_id = self.resolve_name(co_name)["id"]
221 if co_id != id_:
222 self.coauthors[id_][co_id] += 1
223
224 def verify(self):
225 for name, ids in self.name_to_ids.items():
226 for id_ in ids:
227 cname = self.id_to_canonical[id_]
228 if name != cname and name not in self.id_to_used[id_]:
229 log.warning(
230 "Variant name '{}' of '{}' is not used".format(
231 repr(name),
232 repr(cname)))
233 for name, d in self.name_to_papers.items():
234 if len(d[False]) > 0 and len(d[True]) > 0:
235 log.error("Name '{}' is used both with and without explicit id".format(repr(name)))
236 log.error(
237 " Please add an id to paper(s): {}".format(
238 " ".join(d[False])
239 )
240 )
241 log.error(
242 " Or remove the id from paper(s): {}".format(
243 " ".join(d[True])
244 )
245 )
246
247 def personids(self):
248 return self.id_to_canonical.keys()
249
250 def get_canonical_name(self, id_):
251 return self.id_to_canonical[id_]
252
253 def set_canonical_name(self, id_, name):
254 if id_ in self.id_to_canonical:
255 log.error("Person id '{}' is used by both '{}' and '{}'".format(id_, name, self.id_to_canonical[id_]))
256 self.id_to_canonical[id_] = name
257 self.name_to_ids[name].append(id_)
258
259 def add_variant_name(self, id_, name):
260 self.name_to_ids[name].append(id_)
261
262 def get_used_names(self, id_):
263 """Return a list of all names used for a given person."""
264 return self.id_to_used[id_]
265
266 def get_ids(self, name: PersonName) -> List[str]:
267 """
268 Returns a list of distinct IDs (people) associated with a surface form.
269
270 :param name: The name (surface form) of the person being searched (id field ignored).
271 :return: A list of name ID strings.
272 """
273 if name not in self.name_to_ids:
274 id_ = self.fresh_id(name)
275 self.set_canonical_name(id_, name)
276
277 return sorted(self.name_to_ids[name])
278
279 def resolve_name(self, name, id_=None):
280 """Find person named 'name' and return a dict with fields
281 'first', 'last', 'id'"""
282 if id_ is None:
283 ids = self.get_ids(name)
284 assert len(ids) > 0
285 if len(ids) > 1:
286 log.debug("Name '{}' is ambiguous between {}".format(
287 repr(name),
288 ', '.join("'{}'".format(i) for i in ids)
289 ))
290 # Just return the first
291 id_ = ids[0]
292 d = name.as_dict()
293 d["id"] = id_
294 return d
295
296 def fresh_id(self, name):
297 assert name not in self.name_to_ids, name
298 slug, i = slugify(repr(name)), 0
299 while slug == "" or slug in self.id_to_canonical:
300 i += 1
301 slug = "{}{}".format(slugify(repr(name)), i)
302 return slug
303
304 def get_papers(self, id_, role=None):
305 if role is None:
306 return [p for p_list in self.id_to_papers[id_].values() for p in p_list]
307 return self.id_to_papers[id_][role]
308
309 def get_coauthors(self, id_):
310 return self.coauthors[id_].items()
311
312 def get_venues(self, vidx: VenueIndex, id_):
313 """Get a list of venues a person has published in, with counts."""
314 venues = Counter()
315 for paper in self.get_papers(id_):
316 for venue in vidx.get_associated_venues(paper):
317 venues[venue] += 1
318 return venues
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/anthology/index.py b/bin/anthology/index.py
--- a/bin/anthology/index.py
+++ b/bin/anthology/index.py
@@ -109,6 +109,22 @@
self.comments[id_] = entry["comment"]
if "similar" in entry:
self.similar[id_].update(entry["similar"])
+ for other in entry["similar"]:
+ if id_ not in self.similar[other]:
+ log.debug('inferring similar name {} -> {}'.format(other, id_))
+ self.similar[other].add(id_)
+
+ # form transitive closure of self.similar
+ again = True
+ while again:
+ again = False
+ for x in list(self.similar):
+ for y in list(self.similar[x]):
+ for z in list(self.similar[y]):
+ if z != x and z not in self.similar[x]:
+ self.similar[x].add(z)
+ log.debug('inferring similar name {} -> {}'.format(x, z))
+ again = True
def _is_stopword(self, word, paper):
"""Determines if a given word should be considered a stopword for
| {"golden_diff": "diff --git a/bin/anthology/index.py b/bin/anthology/index.py\n--- a/bin/anthology/index.py\n+++ b/bin/anthology/index.py\n@@ -109,6 +109,22 @@\n self.comments[id_] = entry[\"comment\"]\n if \"similar\" in entry:\n self.similar[id_].update(entry[\"similar\"])\n+ for other in entry[\"similar\"]:\n+ if id_ not in self.similar[other]:\n+ log.debug('inferring similar name {} -> {}'.format(other, id_))\n+ self.similar[other].add(id_)\n+ \n+ # form transitive closure of self.similar\n+ again = True\n+ while again:\n+ again = False\n+ for x in list(self.similar):\n+ for y in list(self.similar[x]):\n+ for z in list(self.similar[y]):\n+ if z != x and z not in self.similar[x]:\n+ self.similar[x].add(z)\n+ log.debug('inferring similar name {} -> {}'.format(x, z))\n+ again = True\n \n def _is_stopword(self, word, paper):\n \"\"\"Determines if a given word should be considered a stopword for\n", "issue": "`similar` field in name_variants.yaml should be symmetric and transitive\nThis comes up in #615.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nimport re\nimport yaml\nfrom collections import defaultdict, Counter\nfrom slugify import slugify\nfrom stop_words import get_stop_words\nfrom .formatter import bibtex_encode\nfrom .people import PersonName\nfrom .venues import VenueIndex\n\nfrom typing import List\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\nBIBKEY_MAX_NAMES = 2\n\n\ndef load_stopwords(language):\n return [t for w in get_stop_words(language) for t in slugify(w).split(\"-\")]\n\n\nclass AnthologyIndex:\n \"\"\"Keeps an index of persons, their associated papers, paper bibliography\n keys, etc..\"\"\"\n\n def __init__(self, parent, srcdir=None):\n self._parent = parent\n self.bibkeys = set()\n self.stopwords = load_stopwords(\"en\")\n self.id_to_canonical = {} # maps ids to canonical names\n self.id_to_used = defaultdict(set) # maps ids to all names actually used\n self.name_to_ids = defaultdict(list) # maps canonical/variant names to ids\n self.coauthors = defaultdict(Counter) # maps ids to co-author ids\n self.comments = {} # maps ids to comments (used for distinguishing authors with same name)\n self.similar = defaultdict(set)\n self.id_to_papers = defaultdict(lambda: defaultdict(list)) # id -> role -> papers\n self.name_to_papers = defaultdict(lambda: defaultdict(list)) # name -> (explicit id?) -> papers; used only for error checking\n if srcdir is not None:\n self.load_variant_list(srcdir)\n\n def load_variant_list(self, directory):\n with open(\"{}/yaml/name_variants.yaml\".format(directory), \"r\") as f:\n name_list = yaml.load(f, Loader=Loader)\n\n # Reserve ids for people with explicit ids in variant list\n for entry in name_list:\n if \"id\" in entry:\n id_ = entry[\"id\"]\n canonical = entry[\"canonical\"]\n canonical = PersonName.from_dict(canonical)\n self.set_canonical_name(id_, canonical)\n # Automatically add people with same canonical name to similar list\n for name, ids in self.name_to_ids.items():\n if len(ids) > 1:\n for id1 in ids:\n for id2 in ids:\n if id2 != id1:\n self.similar[id1].add(id2)\n for entry in name_list:\n try:\n canonical = entry[\"canonical\"]\n variants = entry.get(\"variants\", [])\n id_ = entry.get(\"id\", None)\n except (KeyError, TypeError):\n log.error(\"Couldn't parse name variant entry: {}\".format(entry))\n continue\n canonical = PersonName.from_dict(canonical)\n if id_ is None:\n if canonical in self.name_to_ids:\n log.error(\n \"Canonical name '{}' is ambiguous but doesn't have an id; please add one\". format(canonical))\n id_ = self.fresh_id(canonical)\n self.set_canonical_name(id_, canonical)\n for variant in variants:\n variant = PersonName.from_dict(variant)\n if variant in self.name_to_ids:\n log.error(\n \"Tried to add '{}' as variant of '{}', but is already a variant of '{}'\".format(\n repr(variant),\n repr(canonical),\n repr(self.id_to_canonical[\n self.name_to_ids[variant][0]\n ]),\n )\n )\n continue\n self.add_variant_name(id_, variant)\n if \"comment\" in entry:\n self.comments[id_] = entry[\"comment\"]\n if \"similar\" in entry:\n self.similar[id_].update(entry[\"similar\"])\n\n def _is_stopword(self, word, paper):\n \"\"\"Determines if a given word should be considered a stopword for\n the purpose of generating BibTeX keys.\"\"\"\n if word in self.stopwords:\n return True\n if paper.is_volume:\n # Some simple heuristics to exclude probably uninformative words\n # -- these are not perfect\n if word in (\n \"proceedings\",\n \"volume\",\n \"conference\",\n \"workshop\",\n \"annual\",\n \"meeting\",\n \"computational\",\n ):\n return True\n elif (\n re.match(r\"[0-9]+(st|nd|rd|th)\", word)\n or word.endswith(\"ieth\")\n or word.endswith(\"enth\")\n or word\n in (\n \"first\",\n \"second\",\n \"third\",\n \"fourth\",\n \"fifth\",\n \"sixth\",\n \"eighth\",\n \"ninth\",\n \"twelfth\",\n )\n ):\n return True\n return False\n\n def create_bibkey(self, paper):\n \"\"\"Create a unique bibliography key for the given paper.\"\"\"\n if paper.is_volume:\n # Proceedings volumes use venue acronym instead of authors/editors\n bibnames = slugify(self._parent.venues.get_by_letter(paper.full_id[0]))\n else:\n # Regular papers use author/editor names\n names = paper.get(\"author\")\n if not names:\n names = paper.get(\"editor\", [])\n if names:\n if len(names) > BIBKEY_MAX_NAMES:\n bibnames = \"{}-etal\".format(slugify(names[0][0].last))\n else:\n bibnames = \"-\".join(slugify(n.last) for n, _ in names)\n else:\n bibnames = \"nn\"\n title = [\n w\n for w in slugify(paper.get_title(\"plain\")).split(\"-\")\n if not self._is_stopword(w, paper)\n ]\n bibkey = \"{}-{}-{}\".format(bibnames, str(paper.get(\"year\")), title.pop(0))\n while bibkey in self.bibkeys: # guarantee uniqueness\n if title:\n bibkey += \"-{}\".format(title.pop(0))\n else:\n match = re.search(r\"-([0-9][0-9]?)$\", bibkey)\n if match is not None:\n num = int(match.group(1)) + 1\n bibkey = bibkey[: -len(match.group(1))] + \"{}\".format(num)\n else:\n bibkey += \"-2\"\n log.debug(\n \"New bibkey for clash that can't be resolved by adding title words: {}\".format(\n bibkey\n )\n )\n self.bibkeys.add(bibkey)\n return bibkey\n\n def register(self, paper):\n \"\"\"Register all names associated with the given paper.\"\"\"\n from .papers import Paper\n\n assert isinstance(paper, Paper), \"Expected Paper, got {} ({})\".format(\n type(paper), repr(paper)\n )\n paper.bibkey = self.create_bibkey(paper)\n for role in (\"author\", \"editor\"):\n for name, id_ in paper.get(role, []):\n if id_ is None:\n if len(self.name_to_ids.get(name, [])) > 1:\n log.error(\"Paper {} uses ambiguous name '{}' without id\".format(paper.full_id, name))\n log.error(\" Please add an id, for example: {}\".format(\" \".join(self.name_to_ids[name])))\n id_ = self.resolve_name(name)[\"id\"]\n explicit = False\n else:\n if id_ not in self.id_to_canonical:\n log.error(\"Paper {} uses name '{}' with id '{}' that does not exist\".format(paper.full_id, name, id_))\n explicit = True\n \n self.id_to_used[id_].add(name)\n # Register paper\n self.id_to_papers[id_][role].append(paper.full_id)\n self.name_to_papers[name][explicit].append(paper.full_id)\n # Register co-author(s)\n for co_name, co_id in paper.get(role):\n if co_id is None:\n co_id = self.resolve_name(co_name)[\"id\"]\n if co_id != id_:\n self.coauthors[id_][co_id] += 1\n\n def verify(self):\n for name, ids in self.name_to_ids.items():\n for id_ in ids:\n cname = self.id_to_canonical[id_]\n if name != cname and name not in self.id_to_used[id_]:\n log.warning(\n \"Variant name '{}' of '{}' is not used\".format(\n repr(name),\n repr(cname)))\n for name, d in self.name_to_papers.items():\n if len(d[False]) > 0 and len(d[True]) > 0:\n log.error(\"Name '{}' is used both with and without explicit id\".format(repr(name)))\n log.error(\n \" Please add an id to paper(s): {}\".format(\n \" \".join(d[False])\n )\n )\n log.error(\n \" Or remove the id from paper(s): {}\".format(\n \" \".join(d[True])\n )\n )\n \n def personids(self):\n return self.id_to_canonical.keys()\n\n def get_canonical_name(self, id_):\n return self.id_to_canonical[id_]\n\n def set_canonical_name(self, id_, name):\n if id_ in self.id_to_canonical:\n log.error(\"Person id '{}' is used by both '{}' and '{}'\".format(id_, name, self.id_to_canonical[id_]))\n self.id_to_canonical[id_] = name\n self.name_to_ids[name].append(id_)\n\n def add_variant_name(self, id_, name):\n self.name_to_ids[name].append(id_)\n\n def get_used_names(self, id_):\n \"\"\"Return a list of all names used for a given person.\"\"\"\n return self.id_to_used[id_]\n\n def get_ids(self, name: PersonName) -> List[str]:\n \"\"\"\n Returns a list of distinct IDs (people) associated with a surface form.\n\n :param name: The name (surface form) of the person being searched (id field ignored).\n :return: A list of name ID strings.\n \"\"\"\n if name not in self.name_to_ids:\n id_ = self.fresh_id(name)\n self.set_canonical_name(id_, name)\n\n return sorted(self.name_to_ids[name])\n\n def resolve_name(self, name, id_=None):\n \"\"\"Find person named 'name' and return a dict with fields\n 'first', 'last', 'id'\"\"\"\n if id_ is None:\n ids = self.get_ids(name)\n assert len(ids) > 0\n if len(ids) > 1:\n log.debug(\"Name '{}' is ambiguous between {}\".format(\n repr(name),\n ', '.join(\"'{}'\".format(i) for i in ids)\n ))\n # Just return the first\n id_ = ids[0]\n d = name.as_dict()\n d[\"id\"] = id_\n return d\n\n def fresh_id(self, name):\n assert name not in self.name_to_ids, name\n slug, i = slugify(repr(name)), 0\n while slug == \"\" or slug in self.id_to_canonical:\n i += 1\n slug = \"{}{}\".format(slugify(repr(name)), i)\n return slug\n\n def get_papers(self, id_, role=None):\n if role is None:\n return [p for p_list in self.id_to_papers[id_].values() for p in p_list]\n return self.id_to_papers[id_][role]\n\n def get_coauthors(self, id_):\n return self.coauthors[id_].items()\n\n def get_venues(self, vidx: VenueIndex, id_):\n \"\"\"Get a list of venues a person has published in, with counts.\"\"\"\n venues = Counter()\n for paper in self.get_papers(id_):\n for venue in vidx.get_associated_venues(paper):\n venues[venue] += 1\n return venues\n", "path": "bin/anthology/index.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nimport re\nimport yaml\nfrom collections import defaultdict, Counter\nfrom slugify import slugify\nfrom stop_words import get_stop_words\nfrom .formatter import bibtex_encode\nfrom .people import PersonName\nfrom .venues import VenueIndex\n\nfrom typing import List\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\nBIBKEY_MAX_NAMES = 2\n\n\ndef load_stopwords(language):\n return [t for w in get_stop_words(language) for t in slugify(w).split(\"-\")]\n\n\nclass AnthologyIndex:\n \"\"\"Keeps an index of persons, their associated papers, paper bibliography\n keys, etc..\"\"\"\n\n def __init__(self, parent, srcdir=None):\n self._parent = parent\n self.bibkeys = set()\n self.stopwords = load_stopwords(\"en\")\n self.id_to_canonical = {} # maps ids to canonical names\n self.id_to_used = defaultdict(set) # maps ids to all names actually used\n self.name_to_ids = defaultdict(list) # maps canonical/variant names to ids\n self.coauthors = defaultdict(Counter) # maps ids to co-author ids\n self.comments = {} # maps ids to comments (used for distinguishing authors with same name)\n self.similar = defaultdict(set)\n self.id_to_papers = defaultdict(lambda: defaultdict(list)) # id -> role -> papers\n self.name_to_papers = defaultdict(lambda: defaultdict(list)) # name -> (explicit id?) -> papers; used only for error checking\n if srcdir is not None:\n self.load_variant_list(srcdir)\n\n def load_variant_list(self, directory):\n with open(\"{}/yaml/name_variants.yaml\".format(directory), \"r\") as f:\n name_list = yaml.load(f, Loader=Loader)\n\n # Reserve ids for people with explicit ids in variant list\n for entry in name_list:\n if \"id\" in entry:\n id_ = entry[\"id\"]\n canonical = entry[\"canonical\"]\n canonical = PersonName.from_dict(canonical)\n self.set_canonical_name(id_, canonical)\n # Automatically add people with same canonical name to similar list\n for name, ids in self.name_to_ids.items():\n if len(ids) > 1:\n for id1 in ids:\n for id2 in ids:\n if id2 != id1:\n self.similar[id1].add(id2)\n for entry in name_list:\n try:\n canonical = entry[\"canonical\"]\n variants = entry.get(\"variants\", [])\n id_ = entry.get(\"id\", None)\n except (KeyError, TypeError):\n log.error(\"Couldn't parse name variant entry: {}\".format(entry))\n continue\n canonical = PersonName.from_dict(canonical)\n if id_ is None:\n if canonical in self.name_to_ids:\n log.error(\n \"Canonical name '{}' is ambiguous but doesn't have an id; please add one\". format(canonical))\n id_ = self.fresh_id(canonical)\n self.set_canonical_name(id_, canonical)\n for variant in variants:\n variant = PersonName.from_dict(variant)\n if variant in self.name_to_ids:\n log.error(\n \"Tried to add '{}' as variant of '{}', but is already a variant of '{}'\".format(\n repr(variant),\n repr(canonical),\n repr(self.id_to_canonical[\n self.name_to_ids[variant][0]\n ]),\n )\n )\n continue\n self.add_variant_name(id_, variant)\n if \"comment\" in entry:\n self.comments[id_] = entry[\"comment\"]\n if \"similar\" in entry:\n self.similar[id_].update(entry[\"similar\"])\n for other in entry[\"similar\"]:\n if id_ not in self.similar[other]:\n log.debug('inferring similar name {} -> {}'.format(other, id_))\n self.similar[other].add(id_)\n \n # form transitive closure of self.similar\n again = True\n while again:\n again = False\n for x in list(self.similar):\n for y in list(self.similar[x]):\n for z in list(self.similar[y]):\n if z != x and z not in self.similar[x]:\n self.similar[x].add(z)\n log.debug('inferring similar name {} -> {}'.format(x, z))\n again = True\n\n def _is_stopword(self, word, paper):\n \"\"\"Determines if a given word should be considered a stopword for\n the purpose of generating BibTeX keys.\"\"\"\n if word in self.stopwords:\n return True\n if paper.is_volume:\n # Some simple heuristics to exclude probably uninformative words\n # -- these are not perfect\n if word in (\n \"proceedings\",\n \"volume\",\n \"conference\",\n \"workshop\",\n \"annual\",\n \"meeting\",\n \"computational\",\n ):\n return True\n elif (\n re.match(r\"[0-9]+(st|nd|rd|th)\", word)\n or word.endswith(\"ieth\")\n or word.endswith(\"enth\")\n or word\n in (\n \"first\",\n \"second\",\n \"third\",\n \"fourth\",\n \"fifth\",\n \"sixth\",\n \"eighth\",\n \"ninth\",\n \"twelfth\",\n )\n ):\n return True\n return False\n\n def create_bibkey(self, paper):\n \"\"\"Create a unique bibliography key for the given paper.\"\"\"\n if paper.is_volume:\n # Proceedings volumes use venue acronym instead of authors/editors\n bibnames = slugify(self._parent.venues.get_by_letter(paper.full_id[0]))\n else:\n # Regular papers use author/editor names\n names = paper.get(\"author\")\n if not names:\n names = paper.get(\"editor\", [])\n if names:\n if len(names) > BIBKEY_MAX_NAMES:\n bibnames = \"{}-etal\".format(slugify(names[0][0].last))\n else:\n bibnames = \"-\".join(slugify(n.last) for n, _ in names)\n else:\n bibnames = \"nn\"\n title = [\n w\n for w in slugify(paper.get_title(\"plain\")).split(\"-\")\n if not self._is_stopword(w, paper)\n ]\n bibkey = \"{}-{}-{}\".format(bibnames, str(paper.get(\"year\")), title.pop(0))\n while bibkey in self.bibkeys: # guarantee uniqueness\n if title:\n bibkey += \"-{}\".format(title.pop(0))\n else:\n match = re.search(r\"-([0-9][0-9]?)$\", bibkey)\n if match is not None:\n num = int(match.group(1)) + 1\n bibkey = bibkey[: -len(match.group(1))] + \"{}\".format(num)\n else:\n bibkey += \"-2\"\n log.debug(\n \"New bibkey for clash that can't be resolved by adding title words: {}\".format(\n bibkey\n )\n )\n self.bibkeys.add(bibkey)\n return bibkey\n\n def register(self, paper):\n \"\"\"Register all names associated with the given paper.\"\"\"\n from .papers import Paper\n\n assert isinstance(paper, Paper), \"Expected Paper, got {} ({})\".format(\n type(paper), repr(paper)\n )\n paper.bibkey = self.create_bibkey(paper)\n for role in (\"author\", \"editor\"):\n for name, id_ in paper.get(role, []):\n if id_ is None:\n if len(self.name_to_ids.get(name, [])) > 1:\n log.error(\"Paper {} uses ambiguous name '{}' without id\".format(paper.full_id, name))\n log.error(\" Please add an id, for example: {}\".format(\" \".join(self.name_to_ids[name])))\n id_ = self.resolve_name(name)[\"id\"]\n explicit = False\n else:\n if id_ not in self.id_to_canonical:\n log.error(\"Paper {} uses name '{}' with id '{}' that does not exist\".format(paper.full_id, name, id_))\n explicit = True\n \n self.id_to_used[id_].add(name)\n # Register paper\n self.id_to_papers[id_][role].append(paper.full_id)\n self.name_to_papers[name][explicit].append(paper.full_id)\n # Register co-author(s)\n for co_name, co_id in paper.get(role):\n if co_id is None:\n co_id = self.resolve_name(co_name)[\"id\"]\n if co_id != id_:\n self.coauthors[id_][co_id] += 1\n\n def verify(self):\n for name, ids in self.name_to_ids.items():\n for id_ in ids:\n cname = self.id_to_canonical[id_]\n if name != cname and name not in self.id_to_used[id_]:\n log.warning(\n \"Variant name '{}' of '{}' is not used\".format(\n repr(name),\n repr(cname)))\n for name, d in self.name_to_papers.items():\n if len(d[False]) > 0 and len(d[True]) > 0:\n log.error(\"Name '{}' is used both with and without explicit id\".format(repr(name)))\n log.error(\n \" Please add an id to paper(s): {}\".format(\n \" \".join(d[False])\n )\n )\n log.error(\n \" Or remove the id from paper(s): {}\".format(\n \" \".join(d[True])\n )\n )\n \n def personids(self):\n return self.id_to_canonical.keys()\n\n def get_canonical_name(self, id_):\n return self.id_to_canonical[id_]\n\n def set_canonical_name(self, id_, name):\n if id_ in self.id_to_canonical:\n log.error(\"Person id '{}' is used by both '{}' and '{}'\".format(id_, name, self.id_to_canonical[id_]))\n self.id_to_canonical[id_] = name\n self.name_to_ids[name].append(id_)\n\n def add_variant_name(self, id_, name):\n self.name_to_ids[name].append(id_)\n\n def get_used_names(self, id_):\n \"\"\"Return a list of all names used for a given person.\"\"\"\n return self.id_to_used[id_]\n\n def get_ids(self, name: PersonName) -> List[str]:\n \"\"\"\n Returns a list of distinct IDs (people) associated with a surface form.\n\n :param name: The name (surface form) of the person being searched (id field ignored).\n :return: A list of name ID strings.\n \"\"\"\n if name not in self.name_to_ids:\n id_ = self.fresh_id(name)\n self.set_canonical_name(id_, name)\n\n return sorted(self.name_to_ids[name])\n\n def resolve_name(self, name, id_=None):\n \"\"\"Find person named 'name' and return a dict with fields\n 'first', 'last', 'id'\"\"\"\n if id_ is None:\n ids = self.get_ids(name)\n assert len(ids) > 0\n if len(ids) > 1:\n log.debug(\"Name '{}' is ambiguous between {}\".format(\n repr(name),\n ', '.join(\"'{}'\".format(i) for i in ids)\n ))\n # Just return the first\n id_ = ids[0]\n d = name.as_dict()\n d[\"id\"] = id_\n return d\n\n def fresh_id(self, name):\n assert name not in self.name_to_ids, name\n slug, i = slugify(repr(name)), 0\n while slug == \"\" or slug in self.id_to_canonical:\n i += 1\n slug = \"{}{}\".format(slugify(repr(name)), i)\n return slug\n\n def get_papers(self, id_, role=None):\n if role is None:\n return [p for p_list in self.id_to_papers[id_].values() for p in p_list]\n return self.id_to_papers[id_][role]\n\n def get_coauthors(self, id_):\n return self.coauthors[id_].items()\n\n def get_venues(self, vidx: VenueIndex, id_):\n \"\"\"Get a list of venues a person has published in, with counts.\"\"\"\n venues = Counter()\n for paper in self.get_papers(id_):\n for venue in vidx.get_associated_venues(paper):\n venues[venue] += 1\n return venues\n", "path": "bin/anthology/index.py"}]} | 3,857 | 271 |
gh_patches_debug_39205 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Declare dependent dimensions
In Pyro, we can declare dependent dimensions through `.to_event()`. Currently, this is useful for correctly computing log_prob of a transformed distribution where `transform.event_dim>0` but `base_dist.event_shape=()`. In particular, we need this to declare `base_dist` in autoguide.
In Pyro, this is achieved by using Independent class. It seems unnecessary to me. Can we just manipulate `batch_shape`, `event_shape` and correctly compute the returned value for `log_prob`? I think we just need 1 line of code for each distribution
```
def log_prob(...):
...
# event_ndim minus 1 for multivariate distributions
return sum_rightmost(log_prob, len(self.event_shape))
```
If Independent class turns out to be useful, then we will implement it instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpyro/distributions/distribution.py`
Content:
```
1 # The implementation follows the design in PyTorch: torch.distributions.distribution.py
2 #
3 # Copyright (c) 2016- Facebook, Inc (Adam Paszke)
4 # Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
5 # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
6 # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
7 # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
8 # Copyright (c) 2011-2013 NYU (Clement Farabet)
9 # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
10 # Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
11 # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
12 #
13 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
14 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
17 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23 # POSSIBILITY OF SUCH DAMAGE.
24
25 import jax.numpy as np
26
27 from numpyro.distributions.constraints import Transform, is_dependent
28 from numpyro.distributions.util import lazy_property, sum_rightmost
29
30
31 class Distribution(object):
32 """
33 Base class for probability distributions in NumPyro. The design largely
34 follows from :mod:`torch.distributions`.
35
36 :param batch_shape: The batch shape for the distribution. This designates
37 independent (possibly non-identical) dimensions of a sample from the
38 distribution. This is fixed for a distribution instance and is inferred
39 from the shape of the distribution parameters.
40 :param event_shape: The event shape for the distribution. This designates
41 the dependent dimensions of a sample from the distribution. These are
42 collapsed when we evaluate the log probability density of a batch of
43 samples using `.log_prob`.
44 :param validate_args: Whether to enable validation of distribution
45 parameters and arguments to `.log_prob` method.
46
47 As an example:
48
49 .. testsetup::
50
51 import jax.numpy as np
52 import numpyro.distributions as dist
53
54 .. doctest::
55
56 >>> d = dist.Dirichlet(np.ones((2, 3, 4)))
57 >>> d.batch_shape
58 (2, 3)
59 >>> d.event_shape
60 (4,)
61 """
62 arg_constraints = {}
63 support = None
64 reparametrized_params = []
65 _validate_args = False
66
67 def __init__(self, batch_shape=(), event_shape=(), validate_args=None):
68 self._batch_shape = batch_shape
69 self._event_shape = event_shape
70 if validate_args is not None:
71 self._validate_args = validate_args
72 if self._validate_args:
73 for param, constraint in self.arg_constraints.items():
74 if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):
75 continue
76 if is_dependent(constraint):
77 continue # skip constraints that cannot be checked
78 if not np.all(constraint(getattr(self, param))):
79 raise ValueError("The parameter {} has invalid values".format(param))
80 super(Distribution, self).__init__()
81
82 @property
83 def batch_shape(self):
84 """
85 Returns the shape over which the distribution parameters are batched.
86
87 :return: batch shape of the distribution.
88 :rtype: tuple
89 """
90 return self._batch_shape
91
92 @property
93 def event_shape(self):
94 """
95 Returns the shape of a single sample from the distribution without
96 batching.
97
98 :return: event shape of the distribution.
99 :rtype: tuple
100 """
101 return self._event_shape
102
103 def sample(self, key, sample_shape=()):
104 """
105 Returns a sample from the distribution having shape given by
106 `sample_shape + batch_shape + event_shape`. Note that when `sample_shape` is non-empty,
107 leading dimensions (of size `sample_shape`) of the returned sample will
108 be filled with iid draws from the distribution instance.
109
110 :param jax.random.PRNGKey key: the rng key to be used for the distribution.
111 :param tuple sample_shape: the sample shape for the distribution.
112 :return: an array of shape `sample_shape + batch_shape + event_shape`
113 :rtype: numpy.ndarray
114 """
115 raise NotImplementedError
116
117 def sample_with_intermediates(self, key, sample_shape=()):
118 """
119 Same as ``sample`` except that any intermediate computations are
120 returned (useful for `TransformedDistribution`).
121
122 :param jax.random.PRNGKey key: the rng key to be used for the distribution.
123 :param tuple sample_shape: the sample shape for the distribution.
124 :return: an array of shape `sample_shape + batch_shape + event_shape`
125 :rtype: numpy.ndarray
126 """
127 return self.sample(key, sample_shape=sample_shape), []
128
129 def transform_with_intermediates(self, base_value):
130 return base_value, []
131
132 def log_prob(self, value):
133 """
134 Evaluates the log probability density for a batch of samples given by
135 `value`.
136
137 :param value: A batch of samples from the distribution.
138 :return: an array with shape `value.shape[:-self.event_shape]`
139 :rtype: numpy.ndarray
140 """
141 raise NotImplementedError
142
143 @property
144 def mean(self):
145 """
146 Mean of the distribution.
147 """
148 raise NotImplementedError
149
150 @property
151 def variance(self):
152 """
153 Variance of the distribution.
154 """
155 raise NotImplementedError
156
157 def _validate_sample(self, value):
158 if not np.all(self.support(value)):
159 raise ValueError('Invalid values provided to log prob method. '
160 'The value argument must be within the support.')
161
162 def __call__(self, *args, **kwargs):
163 key = kwargs.pop('random_state')
164 sample_intermediates = kwargs.pop('sample_intermediates', False)
165 if sample_intermediates:
166 return self.sample_with_intermediates(key, *args, **kwargs)
167 return self.sample(key, *args, **kwargs)
168
169
170 class TransformedDistribution(Distribution):
171 """
172 Returns a distribution instance obtained as a result of applying
173 a sequence of transforms to a base distribution. For an example,
174 see :class:`~numpyro.distributions.LogNormal` and
175 :class:`~numpyro.distributions.HalfNormal`.
176
177 :param base_distribution: the base distribution over which to apply transforms.
178 :param transforms: a single transform or a list of transforms.
179 :param validate_args: Whether to enable validation of distribution
180 parameters and arguments to `.log_prob` method.
181 """
182 arg_constraints = {}
183
184 def __init__(self, base_distribution, transforms, validate_args=None):
185 if isinstance(transforms, Transform):
186 transforms = [transforms, ]
187 elif isinstance(transforms, list):
188 if not all(isinstance(t, Transform) for t in transforms):
189 raise ValueError("transforms must be a Transform or a list of Transforms")
190 else:
191 raise ValueError("transforms must be a Transform or list, but was {}".format(transforms))
192 # XXX: this logic will not be valid when IndependentDistribution is support;
193 # in that case, it is more involved to support Transform(Indep(Transform));
194 # however, we might not need to support such kind of distribution
195 # and should raise an error if base_distribution is an Indep one
196 if isinstance(base_distribution, TransformedDistribution):
197 self.base_dist = base_distribution.base_dist
198 self.transforms = base_distribution.transforms + transforms
199 else:
200 self.base_dist = base_distribution
201 self.transforms = transforms
202 shape = base_distribution.batch_shape + base_distribution.event_shape
203 event_dim = max([len(base_distribution.event_shape)] + [t.event_dim for t in transforms])
204 batch_shape = shape[:len(shape) - event_dim]
205 event_shape = shape[len(shape) - event_dim:]
206 super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)
207
208 @property
209 def support(self):
210 domain = self.base_dist.support
211 for t in self.transforms:
212 t.domain = domain
213 domain = t.codomain
214 return domain
215
216 def sample(self, key, sample_shape=()):
217 x = self.base_dist.sample(key, sample_shape)
218 for transform in self.transforms:
219 x = transform(x)
220 return x
221
222 def sample_with_intermediates(self, key, sample_shape=()):
223 base_value = self.base_dist.sample(key, sample_shape)
224 return self.transform_with_intermediates(base_value)
225
226 def transform_with_intermediates(self, base_value):
227 x = base_value
228 intermediates = []
229 for transform in self.transforms:
230 x_tmp = x
231 x, t_inter = transform.call_with_intermediates(x)
232 intermediates.append([x_tmp, t_inter])
233 return x, intermediates
234
235 def log_prob(self, value, intermediates=None):
236 if self._validate_args:
237 self._validate_sample(value)
238 if intermediates is not None:
239 if len(intermediates) != len(self.transforms):
240 raise ValueError('Intermediates array has length = {}. Expected = {}.'
241 .format(len(intermediates), len(self.transforms)))
242 event_dim = len(self.event_shape)
243 log_prob = 0.0
244 y = value
245 for i, transform in enumerate(reversed(self.transforms)):
246 x = transform.inv(y) if intermediates is None else intermediates[-i - 1][0]
247 t_inter = None if intermediates is None else intermediates[-i - 1][1]
248 t_log_det = transform.log_abs_det_jacobian(x, y, t_inter)
249 log_prob = log_prob - sum_rightmost(t_log_det, event_dim - transform.event_dim)
250 y = x
251
252 log_prob = log_prob + sum_rightmost(self.base_dist.log_prob(y),
253 event_dim - len(self.base_dist.event_shape))
254 return log_prob
255
256 @property
257 def mean(self):
258 raise NotImplementedError
259
260 @property
261 def variance(self):
262 raise NotImplementedError
263
```
Path: `numpyro/distributions/__init__.py`
Content:
```
1 from numpyro.distributions.continuous import (
2 Beta,
3 Cauchy,
4 Chi2,
5 Dirichlet,
6 Exponential,
7 Gamma,
8 GaussianRandomWalk,
9 HalfCauchy,
10 HalfNormal,
11 InverseGamma,
12 LKJCholesky,
13 LogNormal,
14 MultivariateNormal,
15 Normal,
16 Pareto,
17 StudentT,
18 TruncatedCauchy,
19 TruncatedNormal,
20 Uniform
21 )
22 from numpyro.distributions.discrete import (
23 Bernoulli,
24 BernoulliLogits,
25 BernoulliProbs,
26 Binomial,
27 BinomialLogits,
28 BinomialProbs,
29 Categorical,
30 CategoricalLogits,
31 CategoricalProbs,
32 Delta,
33 Multinomial,
34 MultinomialLogits,
35 MultinomialProbs,
36 Poisson,
37 PRNGIdentity
38 )
39 from numpyro.distributions.distribution import Distribution, TransformedDistribution
40
41 __all__ = [
42 'Bernoulli',
43 'BernoulliLogits',
44 'BernoulliProbs',
45 'Beta',
46 'Binomial',
47 'BinomialLogits',
48 'BinomialProbs',
49 'Categorical',
50 'CategoricalLogits',
51 'CategoricalProbs',
52 'Cauchy',
53 'Chi2',
54 'Delta',
55 'Dirichlet',
56 'Distribution',
57 'Exponential',
58 'Gamma',
59 'GaussianRandomWalk',
60 'HalfCauchy',
61 'HalfNormal',
62 'InverseGamma',
63 'LKJCholesky',
64 'LogNormal',
65 'Multinomial',
66 'MultinomialLogits',
67 'MultinomialProbs',
68 'MultivariateNormal',
69 'Normal',
70 'Pareto',
71 'Poisson',
72 'PRNGIdentity',
73 'StudentT',
74 'TransformedDistribution',
75 'TruncatedCauchy',
76 'TruncatedNormal',
77 'Uniform',
78 ]
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpyro/distributions/__init__.py b/numpyro/distributions/__init__.py
--- a/numpyro/distributions/__init__.py
+++ b/numpyro/distributions/__init__.py
@@ -36,7 +36,7 @@
Poisson,
PRNGIdentity
)
-from numpyro.distributions.distribution import Distribution, TransformedDistribution
+from numpyro.distributions.distribution import Distribution, Independent, TransformedDistribution
__all__ = [
'Bernoulli',
@@ -59,6 +59,7 @@
'GaussianRandomWalk',
'HalfCauchy',
'HalfNormal',
+ 'Independent',
'InverseGamma',
'LKJCholesky',
'LogNormal',
diff --git a/numpyro/distributions/distribution.py b/numpyro/distributions/distribution.py
--- a/numpyro/distributions/distribution.py
+++ b/numpyro/distributions/distribution.py
@@ -166,6 +166,75 @@
return self.sample_with_intermediates(key, *args, **kwargs)
return self.sample(key, *args, **kwargs)
+ def to_event(self, reinterpreted_batch_ndims=None):
+ if reinterpreted_batch_ndims is None:
+ reinterpreted_batch_ndims = len(self.batch_shape)
+ return Independent(self, reinterpreted_batch_ndims)
+
+
+class Independent(Distribution):
+ """
+ Reinterprets batch dimensions of a distribution as event dims by shifting
+ the batch-event dim boundary further to the left.
+
+ From a practical standpoint, this is useful when changing the result of
+ :meth:`log_prob`. For example, a univariate Normal distribution can be
+ interpreted as a multivariate Normal with diagonal covariance::
+
+ .. testsetup::
+
+ import numpyro.distributions as dist
+
+ .. doctest::
+
+ >>> normal = dist.Normal(np.zeros(3), np.ones(3))
+ >>> [normal.batch_shape, normal.event_shape]
+ [torch.Size((3,)), torch.Size(())]
+ >>> diag_normal = Independent(normal, 1)
+ >>> [diag_normal.batch_shape, diag_normal.event_shape]
+ [torch.Size(()), torch.Size((3,))]
+
+ :param numpyro.distribution.Distribution base_distribution: a distribution instance.
+ :param int reinterpreted_batch_ndims: the number of batch dims to reinterpret as event dims.
+ """
+ arg_constraints = {}
+
+ def __init__(self, base_dist, reinterpreted_batch_ndims, validate_args=None):
+ if reinterpreted_batch_ndims > len(base_dist.batch_shape):
+ raise ValueError("Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
+ "actual {} vs {}".format(reinterpreted_batch_ndims,
+ len(base_dist.batch_shape)))
+ shape = base_dist.batch_shape + base_dist.event_shape
+ event_dim = reinterpreted_batch_ndims + len(base_dist.event_shape)
+ batch_shape = shape[:len(shape) - event_dim]
+ event_shape = shape[len(shape) - event_dim:]
+ self.base_dist = base_dist
+ self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
+ super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args)
+
+ @property
+ def support(self):
+ return self.base_dist.support
+
+ @property
+ def reparameterized_params(self):
+ return self.base_dist.reparameterized_params
+
+ @property
+ def mean(self):
+ return self.base_dist.mean
+
+ @property
+ def variance(self):
+ return self.base_dist.variance
+
+ def sample(self, key, sample_shape=()):
+ return self.base_dist.sample(key, sample_shape=sample_shape)
+
+ def log_prob(self, value):
+ log_prob = self.base_dist.log_prob(value)
+ return sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
+
class TransformedDistribution(Distribution):
"""
| {"golden_diff": "diff --git a/numpyro/distributions/__init__.py b/numpyro/distributions/__init__.py\n--- a/numpyro/distributions/__init__.py\n+++ b/numpyro/distributions/__init__.py\n@@ -36,7 +36,7 @@\n Poisson,\n PRNGIdentity\n )\n-from numpyro.distributions.distribution import Distribution, TransformedDistribution\n+from numpyro.distributions.distribution import Distribution, Independent, TransformedDistribution\n \n __all__ = [\n 'Bernoulli',\n@@ -59,6 +59,7 @@\n 'GaussianRandomWalk',\n 'HalfCauchy',\n 'HalfNormal',\n+ 'Independent',\n 'InverseGamma',\n 'LKJCholesky',\n 'LogNormal',\ndiff --git a/numpyro/distributions/distribution.py b/numpyro/distributions/distribution.py\n--- a/numpyro/distributions/distribution.py\n+++ b/numpyro/distributions/distribution.py\n@@ -166,6 +166,75 @@\n return self.sample_with_intermediates(key, *args, **kwargs)\n return self.sample(key, *args, **kwargs)\n \n+ def to_event(self, reinterpreted_batch_ndims=None):\n+ if reinterpreted_batch_ndims is None:\n+ reinterpreted_batch_ndims = len(self.batch_shape)\n+ return Independent(self, reinterpreted_batch_ndims)\n+\n+\n+class Independent(Distribution):\n+ \"\"\"\n+ Reinterprets batch dimensions of a distribution as event dims by shifting\n+ the batch-event dim boundary further to the left.\n+\n+ From a practical standpoint, this is useful when changing the result of\n+ :meth:`log_prob`. For example, a univariate Normal distribution can be\n+ interpreted as a multivariate Normal with diagonal covariance::\n+\n+ .. testsetup::\n+\n+ import numpyro.distributions as dist\n+\n+ .. doctest::\n+\n+ >>> normal = dist.Normal(np.zeros(3), np.ones(3))\n+ >>> [normal.batch_shape, normal.event_shape]\n+ [torch.Size((3,)), torch.Size(())]\n+ >>> diag_normal = Independent(normal, 1)\n+ >>> [diag_normal.batch_shape, diag_normal.event_shape]\n+ [torch.Size(()), torch.Size((3,))]\n+\n+ :param numpyro.distribution.Distribution base_distribution: a distribution instance.\n+ :param int reinterpreted_batch_ndims: the number of batch dims to reinterpret as event dims.\n+ \"\"\"\n+ arg_constraints = {}\n+\n+ def __init__(self, base_dist, reinterpreted_batch_ndims, validate_args=None):\n+ if reinterpreted_batch_ndims > len(base_dist.batch_shape):\n+ raise ValueError(\"Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), \"\n+ \"actual {} vs {}\".format(reinterpreted_batch_ndims,\n+ len(base_dist.batch_shape)))\n+ shape = base_dist.batch_shape + base_dist.event_shape\n+ event_dim = reinterpreted_batch_ndims + len(base_dist.event_shape)\n+ batch_shape = shape[:len(shape) - event_dim]\n+ event_shape = shape[len(shape) - event_dim:]\n+ self.base_dist = base_dist\n+ self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n+ super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n+\n+ @property\n+ def support(self):\n+ return self.base_dist.support\n+\n+ @property\n+ def reparameterized_params(self):\n+ return self.base_dist.reparameterized_params\n+\n+ @property\n+ def mean(self):\n+ return self.base_dist.mean\n+\n+ @property\n+ def variance(self):\n+ return self.base_dist.variance\n+\n+ def sample(self, key, sample_shape=()):\n+ return self.base_dist.sample(key, sample_shape=sample_shape)\n+\n+ def log_prob(self, value):\n+ log_prob = self.base_dist.log_prob(value)\n+ return sum_rightmost(log_prob, self.reinterpreted_batch_ndims)\n+\n \n class TransformedDistribution(Distribution):\n \"\"\"\n", "issue": "Declare dependent dimensions\nIn Pyro, we can declare dependent dimensions through `.to_event()`. Currently, this is useful for correctly computing log_prob of a transformed distribution where `transform.event_dim>0` but `base_dist.event_shape=()`. In particular, we need this to declare `base_dist` in autoguide.\r\n\r\nIn Pyro, this is achieved by using Independent class. It seems unnecessary to me. Can we just manipulate `batch_shape`, `event_shape` and correctly compute the returned value for `log_prob`? I think we just need 1 line of code for each distribution\r\n```\r\ndef log_prob(...):\r\n ...\r\n # event_ndim minus 1 for multivariate distributions\r\n return sum_rightmost(log_prob, len(self.event_shape))\r\n```\r\n\r\nIf Independent class turns out to be useful, then we will implement it instead.\n", "before_files": [{"content": "# The implementation follows the design in PyTorch: torch.distributions.distribution.py\n#\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport jax.numpy as np\n\nfrom numpyro.distributions.constraints import Transform, is_dependent\nfrom numpyro.distributions.util import lazy_property, sum_rightmost\n\n\nclass Distribution(object):\n \"\"\"\n Base class for probability distributions in NumPyro. The design largely\n follows from :mod:`torch.distributions`.\n\n :param batch_shape: The batch shape for the distribution. This designates\n independent (possibly non-identical) dimensions of a sample from the\n distribution. This is fixed for a distribution instance and is inferred\n from the shape of the distribution parameters.\n :param event_shape: The event shape for the distribution. This designates\n the dependent dimensions of a sample from the distribution. These are\n collapsed when we evaluate the log probability density of a batch of\n samples using `.log_prob`.\n :param validate_args: Whether to enable validation of distribution\n parameters and arguments to `.log_prob` method.\n\n As an example:\n\n .. testsetup::\n\n import jax.numpy as np\n import numpyro.distributions as dist\n\n .. doctest::\n\n >>> d = dist.Dirichlet(np.ones((2, 3, 4)))\n >>> d.batch_shape\n (2, 3)\n >>> d.event_shape\n (4,)\n \"\"\"\n arg_constraints = {}\n support = None\n reparametrized_params = []\n _validate_args = False\n\n def __init__(self, batch_shape=(), event_shape=(), validate_args=None):\n self._batch_shape = batch_shape\n self._event_shape = event_shape\n if validate_args is not None:\n self._validate_args = validate_args\n if self._validate_args:\n for param, constraint in self.arg_constraints.items():\n if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):\n continue\n if is_dependent(constraint):\n continue # skip constraints that cannot be checked\n if not np.all(constraint(getattr(self, param))):\n raise ValueError(\"The parameter {} has invalid values\".format(param))\n super(Distribution, self).__init__()\n\n @property\n def batch_shape(self):\n \"\"\"\n Returns the shape over which the distribution parameters are batched.\n\n :return: batch shape of the distribution.\n :rtype: tuple\n \"\"\"\n return self._batch_shape\n\n @property\n def event_shape(self):\n \"\"\"\n Returns the shape of a single sample from the distribution without\n batching.\n\n :return: event shape of the distribution.\n :rtype: tuple\n \"\"\"\n return self._event_shape\n\n def sample(self, key, sample_shape=()):\n \"\"\"\n Returns a sample from the distribution having shape given by\n `sample_shape + batch_shape + event_shape`. Note that when `sample_shape` is non-empty,\n leading dimensions (of size `sample_shape`) of the returned sample will\n be filled with iid draws from the distribution instance.\n\n :param jax.random.PRNGKey key: the rng key to be used for the distribution.\n :param tuple sample_shape: the sample shape for the distribution.\n :return: an array of shape `sample_shape + batch_shape + event_shape`\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n def sample_with_intermediates(self, key, sample_shape=()):\n \"\"\"\n Same as ``sample`` except that any intermediate computations are\n returned (useful for `TransformedDistribution`).\n\n :param jax.random.PRNGKey key: the rng key to be used for the distribution.\n :param tuple sample_shape: the sample shape for the distribution.\n :return: an array of shape `sample_shape + batch_shape + event_shape`\n :rtype: numpy.ndarray\n \"\"\"\n return self.sample(key, sample_shape=sample_shape), []\n\n def transform_with_intermediates(self, base_value):\n return base_value, []\n\n def log_prob(self, value):\n \"\"\"\n Evaluates the log probability density for a batch of samples given by\n `value`.\n\n :param value: A batch of samples from the distribution.\n :return: an array with shape `value.shape[:-self.event_shape]`\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n @property\n def mean(self):\n \"\"\"\n Mean of the distribution.\n \"\"\"\n raise NotImplementedError\n\n @property\n def variance(self):\n \"\"\"\n Variance of the distribution.\n \"\"\"\n raise NotImplementedError\n\n def _validate_sample(self, value):\n if not np.all(self.support(value)):\n raise ValueError('Invalid values provided to log prob method. '\n 'The value argument must be within the support.')\n\n def __call__(self, *args, **kwargs):\n key = kwargs.pop('random_state')\n sample_intermediates = kwargs.pop('sample_intermediates', False)\n if sample_intermediates:\n return self.sample_with_intermediates(key, *args, **kwargs)\n return self.sample(key, *args, **kwargs)\n\n\nclass TransformedDistribution(Distribution):\n \"\"\"\n Returns a distribution instance obtained as a result of applying\n a sequence of transforms to a base distribution. For an example,\n see :class:`~numpyro.distributions.LogNormal` and\n :class:`~numpyro.distributions.HalfNormal`.\n\n :param base_distribution: the base distribution over which to apply transforms.\n :param transforms: a single transform or a list of transforms.\n :param validate_args: Whether to enable validation of distribution\n parameters and arguments to `.log_prob` method.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_distribution, transforms, validate_args=None):\n if isinstance(transforms, Transform):\n transforms = [transforms, ]\n elif isinstance(transforms, list):\n if not all(isinstance(t, Transform) for t in transforms):\n raise ValueError(\"transforms must be a Transform or a list of Transforms\")\n else:\n raise ValueError(\"transforms must be a Transform or list, but was {}\".format(transforms))\n # XXX: this logic will not be valid when IndependentDistribution is support;\n # in that case, it is more involved to support Transform(Indep(Transform));\n # however, we might not need to support such kind of distribution\n # and should raise an error if base_distribution is an Indep one\n if isinstance(base_distribution, TransformedDistribution):\n self.base_dist = base_distribution.base_dist\n self.transforms = base_distribution.transforms + transforms\n else:\n self.base_dist = base_distribution\n self.transforms = transforms\n shape = base_distribution.batch_shape + base_distribution.event_shape\n event_dim = max([len(base_distribution.event_shape)] + [t.event_dim for t in transforms])\n batch_shape = shape[:len(shape) - event_dim]\n event_shape = shape[len(shape) - event_dim:]\n super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @property\n def support(self):\n domain = self.base_dist.support\n for t in self.transforms:\n t.domain = domain\n domain = t.codomain\n return domain\n\n def sample(self, key, sample_shape=()):\n x = self.base_dist.sample(key, sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x\n\n def sample_with_intermediates(self, key, sample_shape=()):\n base_value = self.base_dist.sample(key, sample_shape)\n return self.transform_with_intermediates(base_value)\n\n def transform_with_intermediates(self, base_value):\n x = base_value\n intermediates = []\n for transform in self.transforms:\n x_tmp = x\n x, t_inter = transform.call_with_intermediates(x)\n intermediates.append([x_tmp, t_inter])\n return x, intermediates\n\n def log_prob(self, value, intermediates=None):\n if self._validate_args:\n self._validate_sample(value)\n if intermediates is not None:\n if len(intermediates) != len(self.transforms):\n raise ValueError('Intermediates array has length = {}. Expected = {}.'\n .format(len(intermediates), len(self.transforms)))\n event_dim = len(self.event_shape)\n log_prob = 0.0\n y = value\n for i, transform in enumerate(reversed(self.transforms)):\n x = transform.inv(y) if intermediates is None else intermediates[-i - 1][0]\n t_inter = None if intermediates is None else intermediates[-i - 1][1]\n t_log_det = transform.log_abs_det_jacobian(x, y, t_inter)\n log_prob = log_prob - sum_rightmost(t_log_det, event_dim - transform.event_dim)\n y = x\n\n log_prob = log_prob + sum_rightmost(self.base_dist.log_prob(y),\n event_dim - len(self.base_dist.event_shape))\n return log_prob\n\n @property\n def mean(self):\n raise NotImplementedError\n\n @property\n def variance(self):\n raise NotImplementedError\n", "path": "numpyro/distributions/distribution.py"}, {"content": "from numpyro.distributions.continuous import (\n Beta,\n Cauchy,\n Chi2,\n Dirichlet,\n Exponential,\n Gamma,\n GaussianRandomWalk,\n HalfCauchy,\n HalfNormal,\n InverseGamma,\n LKJCholesky,\n LogNormal,\n MultivariateNormal,\n Normal,\n Pareto,\n StudentT,\n TruncatedCauchy,\n TruncatedNormal,\n Uniform\n)\nfrom numpyro.distributions.discrete import (\n Bernoulli,\n BernoulliLogits,\n BernoulliProbs,\n Binomial,\n BinomialLogits,\n BinomialProbs,\n Categorical,\n CategoricalLogits,\n CategoricalProbs,\n Delta,\n Multinomial,\n MultinomialLogits,\n MultinomialProbs,\n Poisson,\n PRNGIdentity\n)\nfrom numpyro.distributions.distribution import Distribution, TransformedDistribution\n\n__all__ = [\n 'Bernoulli',\n 'BernoulliLogits',\n 'BernoulliProbs',\n 'Beta',\n 'Binomial',\n 'BinomialLogits',\n 'BinomialProbs',\n 'Categorical',\n 'CategoricalLogits',\n 'CategoricalProbs',\n 'Cauchy',\n 'Chi2',\n 'Delta',\n 'Dirichlet',\n 'Distribution',\n 'Exponential',\n 'Gamma',\n 'GaussianRandomWalk',\n 'HalfCauchy',\n 'HalfNormal',\n 'InverseGamma',\n 'LKJCholesky',\n 'LogNormal',\n 'Multinomial',\n 'MultinomialLogits',\n 'MultinomialProbs',\n 'MultivariateNormal',\n 'Normal',\n 'Pareto',\n 'Poisson',\n 'PRNGIdentity',\n 'StudentT',\n 'TransformedDistribution',\n 'TruncatedCauchy',\n 'TruncatedNormal',\n 'Uniform',\n]\n", "path": "numpyro/distributions/__init__.py"}], "after_files": [{"content": "# The implementation follows the design in PyTorch: torch.distributions.distribution.py\n#\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport jax.numpy as np\n\nfrom numpyro.distributions.constraints import Transform, is_dependent\nfrom numpyro.distributions.util import lazy_property, sum_rightmost\n\n\nclass Distribution(object):\n \"\"\"\n Base class for probability distributions in NumPyro. The design largely\n follows from :mod:`torch.distributions`.\n\n :param batch_shape: The batch shape for the distribution. This designates\n independent (possibly non-identical) dimensions of a sample from the\n distribution. This is fixed for a distribution instance and is inferred\n from the shape of the distribution parameters.\n :param event_shape: The event shape for the distribution. This designates\n the dependent dimensions of a sample from the distribution. These are\n collapsed when we evaluate the log probability density of a batch of\n samples using `.log_prob`.\n :param validate_args: Whether to enable validation of distribution\n parameters and arguments to `.log_prob` method.\n\n As an example:\n\n .. testsetup::\n\n import jax.numpy as np\n import numpyro.distributions as dist\n\n .. doctest::\n\n >>> d = dist.Dirichlet(np.ones((2, 3, 4)))\n >>> d.batch_shape\n (2, 3)\n >>> d.event_shape\n (4,)\n \"\"\"\n arg_constraints = {}\n support = None\n reparametrized_params = []\n _validate_args = False\n\n def __init__(self, batch_shape=(), event_shape=(), validate_args=None):\n self._batch_shape = batch_shape\n self._event_shape = event_shape\n if validate_args is not None:\n self._validate_args = validate_args\n if self._validate_args:\n for param, constraint in self.arg_constraints.items():\n if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):\n continue\n if is_dependent(constraint):\n continue # skip constraints that cannot be checked\n if not np.all(constraint(getattr(self, param))):\n raise ValueError(\"The parameter {} has invalid values\".format(param))\n super(Distribution, self).__init__()\n\n @property\n def batch_shape(self):\n \"\"\"\n Returns the shape over which the distribution parameters are batched.\n\n :return: batch shape of the distribution.\n :rtype: tuple\n \"\"\"\n return self._batch_shape\n\n @property\n def event_shape(self):\n \"\"\"\n Returns the shape of a single sample from the distribution without\n batching.\n\n :return: event shape of the distribution.\n :rtype: tuple\n \"\"\"\n return self._event_shape\n\n def sample(self, key, sample_shape=()):\n \"\"\"\n Returns a sample from the distribution having shape given by\n `sample_shape + batch_shape + event_shape`. Note that when `sample_shape` is non-empty,\n leading dimensions (of size `sample_shape`) of the returned sample will\n be filled with iid draws from the distribution instance.\n\n :param jax.random.PRNGKey key: the rng key to be used for the distribution.\n :param tuple sample_shape: the sample shape for the distribution.\n :return: an array of shape `sample_shape + batch_shape + event_shape`\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n def sample_with_intermediates(self, key, sample_shape=()):\n \"\"\"\n Same as ``sample`` except that any intermediate computations are\n returned (useful for `TransformedDistribution`).\n\n :param jax.random.PRNGKey key: the rng key to be used for the distribution.\n :param tuple sample_shape: the sample shape for the distribution.\n :return: an array of shape `sample_shape + batch_shape + event_shape`\n :rtype: numpy.ndarray\n \"\"\"\n return self.sample(key, sample_shape=sample_shape), []\n\n def transform_with_intermediates(self, base_value):\n return base_value, []\n\n def log_prob(self, value):\n \"\"\"\n Evaluates the log probability density for a batch of samples given by\n `value`.\n\n :param value: A batch of samples from the distribution.\n :return: an array with shape `value.shape[:-self.event_shape]`\n :rtype: numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n @property\n def mean(self):\n \"\"\"\n Mean of the distribution.\n \"\"\"\n raise NotImplementedError\n\n @property\n def variance(self):\n \"\"\"\n Variance of the distribution.\n \"\"\"\n raise NotImplementedError\n\n def _validate_sample(self, value):\n if not np.all(self.support(value)):\n raise ValueError('Invalid values provided to log prob method. '\n 'The value argument must be within the support.')\n\n def __call__(self, *args, **kwargs):\n key = kwargs.pop('random_state')\n sample_intermediates = kwargs.pop('sample_intermediates', False)\n if sample_intermediates:\n return self.sample_with_intermediates(key, *args, **kwargs)\n return self.sample(key, *args, **kwargs)\n\n def to_event(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n return Independent(self, reinterpreted_batch_ndims)\n\n\nclass Independent(Distribution):\n \"\"\"\n Reinterprets batch dimensions of a distribution as event dims by shifting\n the batch-event dim boundary further to the left.\n\n From a practical standpoint, this is useful when changing the result of\n :meth:`log_prob`. For example, a univariate Normal distribution can be\n interpreted as a multivariate Normal with diagonal covariance::\n\n .. testsetup::\n\n import numpyro.distributions as dist\n\n .. doctest::\n\n >>> normal = dist.Normal(np.zeros(3), np.ones(3))\n >>> [normal.batch_shape, normal.event_shape]\n [torch.Size((3,)), torch.Size(())]\n >>> diag_normal = Independent(normal, 1)\n >>> [diag_normal.batch_shape, diag_normal.event_shape]\n [torch.Size(()), torch.Size((3,))]\n\n :param numpyro.distribution.Distribution base_distribution: a distribution instance.\n :param int reinterpreted_batch_ndims: the number of batch dims to reinterpret as event dims.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, reinterpreted_batch_ndims, validate_args=None):\n if reinterpreted_batch_ndims > len(base_dist.batch_shape):\n raise ValueError(\"Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), \"\n \"actual {} vs {}\".format(reinterpreted_batch_ndims,\n len(base_dist.batch_shape)))\n shape = base_dist.batch_shape + base_dist.event_shape\n event_dim = reinterpreted_batch_ndims + len(base_dist.event_shape)\n batch_shape = shape[:len(shape) - event_dim]\n event_shape = shape[len(shape) - event_dim:]\n self.base_dist = base_dist\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @property\n def support(self):\n return self.base_dist.support\n\n @property\n def reparameterized_params(self):\n return self.base_dist.reparameterized_params\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n\n def sample(self, key, sample_shape=()):\n return self.base_dist.sample(key, sample_shape=sample_shape)\n\n def log_prob(self, value):\n log_prob = self.base_dist.log_prob(value)\n return sum_rightmost(log_prob, self.reinterpreted_batch_ndims)\n\n\nclass TransformedDistribution(Distribution):\n \"\"\"\n Returns a distribution instance obtained as a result of applying\n a sequence of transforms to a base distribution. For an example,\n see :class:`~numpyro.distributions.LogNormal` and\n :class:`~numpyro.distributions.HalfNormal`.\n\n :param base_distribution: the base distribution over which to apply transforms.\n :param transforms: a single transform or a list of transforms.\n :param validate_args: Whether to enable validation of distribution\n parameters and arguments to `.log_prob` method.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_distribution, transforms, validate_args=None):\n if isinstance(transforms, Transform):\n transforms = [transforms, ]\n elif isinstance(transforms, list):\n if not all(isinstance(t, Transform) for t in transforms):\n raise ValueError(\"transforms must be a Transform or a list of Transforms\")\n else:\n raise ValueError(\"transforms must be a Transform or list, but was {}\".format(transforms))\n # XXX: this logic will not be valid when IndependentDistribution is support;\n # in that case, it is more involved to support Transform(Indep(Transform));\n # however, we might not need to support such kind of distribution\n # and should raise an error if base_distribution is an Indep one\n if isinstance(base_distribution, TransformedDistribution):\n self.base_dist = base_distribution.base_dist\n self.transforms = base_distribution.transforms + transforms\n else:\n self.base_dist = base_distribution\n self.transforms = transforms\n shape = base_distribution.batch_shape + base_distribution.event_shape\n event_dim = max([len(base_distribution.event_shape)] + [t.event_dim for t in transforms])\n batch_shape = shape[:len(shape) - event_dim]\n event_shape = shape[len(shape) - event_dim:]\n super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @property\n def support(self):\n domain = self.base_dist.support\n for t in self.transforms:\n t.domain = domain\n domain = t.codomain\n return domain\n\n def sample(self, key, sample_shape=()):\n x = self.base_dist.sample(key, sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x\n\n def sample_with_intermediates(self, key, sample_shape=()):\n base_value = self.base_dist.sample(key, sample_shape)\n return self.transform_with_intermediates(base_value)\n\n def transform_with_intermediates(self, base_value):\n x = base_value\n intermediates = []\n for transform in self.transforms:\n x_tmp = x\n x, t_inter = transform.call_with_intermediates(x)\n intermediates.append([x_tmp, t_inter])\n return x, intermediates\n\n def log_prob(self, value, intermediates=None):\n if self._validate_args:\n self._validate_sample(value)\n if intermediates is not None:\n if len(intermediates) != len(self.transforms):\n raise ValueError('Intermediates array has length = {}. Expected = {}.'\n .format(len(intermediates), len(self.transforms)))\n event_dim = len(self.event_shape)\n log_prob = 0.0\n y = value\n for i, transform in enumerate(reversed(self.transforms)):\n x = transform.inv(y) if intermediates is None else intermediates[-i - 1][0]\n t_inter = None if intermediates is None else intermediates[-i - 1][1]\n t_log_det = transform.log_abs_det_jacobian(x, y, t_inter)\n log_prob = log_prob - sum_rightmost(t_log_det, event_dim - transform.event_dim)\n y = x\n\n log_prob = log_prob + sum_rightmost(self.base_dist.log_prob(y),\n event_dim - len(self.base_dist.event_shape))\n return log_prob\n\n @property\n def mean(self):\n raise NotImplementedError\n\n @property\n def variance(self):\n raise NotImplementedError\n", "path": "numpyro/distributions/distribution.py"}, {"content": "from numpyro.distributions.continuous import (\n Beta,\n Cauchy,\n Chi2,\n Dirichlet,\n Exponential,\n Gamma,\n GaussianRandomWalk,\n HalfCauchy,\n HalfNormal,\n InverseGamma,\n LKJCholesky,\n LogNormal,\n MultivariateNormal,\n Normal,\n Pareto,\n StudentT,\n TruncatedCauchy,\n TruncatedNormal,\n Uniform\n)\nfrom numpyro.distributions.discrete import (\n Bernoulli,\n BernoulliLogits,\n BernoulliProbs,\n Binomial,\n BinomialLogits,\n BinomialProbs,\n Categorical,\n CategoricalLogits,\n CategoricalProbs,\n Delta,\n Multinomial,\n MultinomialLogits,\n MultinomialProbs,\n Poisson,\n PRNGIdentity\n)\nfrom numpyro.distributions.distribution import Distribution, Independent, TransformedDistribution\n\n__all__ = [\n 'Bernoulli',\n 'BernoulliLogits',\n 'BernoulliProbs',\n 'Beta',\n 'Binomial',\n 'BinomialLogits',\n 'BinomialProbs',\n 'Categorical',\n 'CategoricalLogits',\n 'CategoricalProbs',\n 'Cauchy',\n 'Chi2',\n 'Delta',\n 'Dirichlet',\n 'Distribution',\n 'Exponential',\n 'Gamma',\n 'GaussianRandomWalk',\n 'HalfCauchy',\n 'HalfNormal',\n 'Independent',\n 'InverseGamma',\n 'LKJCholesky',\n 'LogNormal',\n 'Multinomial',\n 'MultinomialLogits',\n 'MultinomialProbs',\n 'MultivariateNormal',\n 'Normal',\n 'Pareto',\n 'Poisson',\n 'PRNGIdentity',\n 'StudentT',\n 'TransformedDistribution',\n 'TruncatedCauchy',\n 'TruncatedNormal',\n 'Uniform',\n]\n", "path": "numpyro/distributions/__init__.py"}]} | 4,089 | 894 |
gh_patches_debug_15758 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3133 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mail_password form crashes if user is not found
Visiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/login/mail_password.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from AccessControl import Unauthorized
3 from Products.CMFPlone import PloneMessageFactory as _
4 from Products.Five import BrowserView
5 from Products.statusmessages.interfaces import IStatusMessage
6
7
8 class MailPasswordView(BrowserView):
9
10 def __call__(self):
11 try:
12 response = self.context.portal_registration.mailPassword(
13 self.request.form.get('userid', ''),
14 self.request,
15 )
16 except ValueError as e:
17 try:
18 msg = _(str(e))
19 except Unauthorized:
20 # If we are not allowed to tell the user, what is wrong, he
21 # should get an error message and contact the admins
22 raise e
23 IStatusMessage(self.request).add(msg)
24 self.request.response.redirect(
25 self.context.absolute_url() + 'mail_password_form'
26 )
27 return response
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py
--- a/Products/CMFPlone/browser/login/mail_password.py
+++ b/Products/CMFPlone/browser/login/mail_password.py
@@ -8,6 +8,7 @@
class MailPasswordView(BrowserView):
def __call__(self):
+ response = None
try:
response = self.context.portal_registration.mailPassword(
self.request.form.get('userid', ''),
@@ -22,6 +23,6 @@
raise e
IStatusMessage(self.request).add(msg)
self.request.response.redirect(
- self.context.absolute_url() + 'mail_password_form'
+ self.context.absolute_url() + '/mail_password_form'
)
return response
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py\n--- a/Products/CMFPlone/browser/login/mail_password.py\n+++ b/Products/CMFPlone/browser/login/mail_password.py\n@@ -8,6 +8,7 @@\n class MailPasswordView(BrowserView):\n \n def __call__(self):\n+ response = None\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n@@ -22,6 +23,6 @@\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n- self.context.absolute_url() + 'mail_password_form'\n+ self.context.absolute_url() + '/mail_password_form'\n )\n return response\n", "issue": "mail_password form crashes if user is not found\nVisiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import Unauthorized\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\n\n\nclass MailPasswordView(BrowserView):\n\n def __call__(self):\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n self.request,\n )\n except ValueError as e:\n try:\n msg = _(str(e))\n except Unauthorized:\n # If we are not allowed to tell the user, what is wrong, he\n # should get an error message and contact the admins\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n self.context.absolute_url() + 'mail_password_form'\n )\n return response\n", "path": "Products/CMFPlone/browser/login/mail_password.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import Unauthorized\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\n\n\nclass MailPasswordView(BrowserView):\n\n def __call__(self):\n response = None\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n self.request,\n )\n except ValueError as e:\n try:\n msg = _(str(e))\n except Unauthorized:\n # If we are not allowed to tell the user, what is wrong, he\n # should get an error message and contact the admins\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n self.context.absolute_url() + '/mail_password_form'\n )\n return response\n", "path": "Products/CMFPlone/browser/login/mail_password.py"}]} | 545 | 178 |
gh_patches_debug_368 | rasdani/github-patches | git_diff | frappe__frappe-17020 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Share doesn't disappear
## Description of the issue
When the read Permission of a Share is removed by de-selecting the checkbox, then the corresponding DocShare is removed in the Backend, but the checkbox is automatically re-selected in the frontend. After a refresh, the share
## Context information (for bug reports)
**Output of `bench version`**
```
frappe 14.x.x-develop
```
## Steps to reproduce the issue
1. Open an arbitrary Document
2. Add a share with read permissions
3. Remove the read permission by clicking the checkbox
### Observed result
The checkbox is automatically re-selected
### Expected result
The share entry disappears
## Additional information
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/share.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe import _
6 from frappe.desk.doctype.notification_log.notification_log import (
7 enqueue_create_notification,
8 get_title,
9 get_title_html,
10 )
11 from frappe.desk.form.document_follow import follow_document
12 from frappe.utils import cint
13
14
15 @frappe.whitelist()
16 def add(
17 doctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0
18 ):
19 """Share the given document with a user."""
20 if not user:
21 user = frappe.session.user
22
23 if not (flags or {}).get("ignore_share_permission"):
24 check_share_permission(doctype, name)
25
26 share_name = get_share_name(doctype, name, user, everyone)
27
28 if share_name:
29 doc = frappe.get_doc("DocShare", share_name)
30 else:
31 doc = frappe.new_doc("DocShare")
32 doc.update(
33 {"user": user, "share_doctype": doctype, "share_name": name, "everyone": cint(everyone)}
34 )
35
36 if flags:
37 doc.flags.update(flags)
38
39 doc.update(
40 {
41 # always add read, since you are adding!
42 "read": 1,
43 "write": cint(write),
44 "submit": cint(submit),
45 "share": cint(share),
46 }
47 )
48
49 doc.save(ignore_permissions=True)
50 notify_assignment(user, doctype, name, everyone, notify=notify)
51
52 if frappe.get_cached_value("User", user, "follow_shared_documents"):
53 follow_document(doctype, name, user)
54
55 return doc
56
57
58 def remove(doctype, name, user, flags=None):
59 share_name = frappe.db.get_value(
60 "DocShare", {"user": user, "share_name": name, "share_doctype": doctype}
61 )
62
63 if share_name:
64 frappe.delete_doc("DocShare", share_name, flags=flags)
65
66
67 @frappe.whitelist()
68 def set_permission(doctype, name, user, permission_to, value=1, everyone=0):
69 """Set share permission."""
70 check_share_permission(doctype, name)
71
72 share_name = get_share_name(doctype, name, user, everyone)
73 value = int(value)
74
75 if not share_name:
76 if value:
77 share = add(doctype, name, user, everyone=everyone, **{permission_to: 1})
78 else:
79 # no share found, nothing to remove
80 share = {}
81 pass
82 else:
83 share = frappe.get_doc("DocShare", share_name)
84 share.flags.ignore_permissions = True
85 share.set(permission_to, value)
86
87 if not value:
88 # un-set higher-order permissions too
89 if permission_to == "read":
90 share.read = share.write = share.submit = share.share = 0
91
92 share.save()
93
94 if not (share.read or share.write or share.submit or share.share):
95 share.delete()
96 share = {}
97
98 return share
99
100
101 @frappe.whitelist()
102 def get_users(doctype, name):
103 """Get list of users with which this document is shared"""
104 return frappe.db.get_all(
105 "DocShare",
106 fields=[
107 "`name`",
108 "`user`",
109 "`read`",
110 "`write`",
111 "`submit`",
112 "`share`",
113 "everyone",
114 "owner",
115 "creation",
116 ],
117 filters=dict(share_doctype=doctype, share_name=name),
118 )
119
120
121 def get_shared(doctype, user=None, rights=None):
122 """Get list of shared document names for given user and DocType.
123
124 :param doctype: DocType of which shared names are queried.
125 :param user: User for which shared names are queried.
126 :param rights: List of rights for which the document is shared. List of `read`, `write`, `share`"""
127
128 if not user:
129 user = frappe.session.user
130
131 if not rights:
132 rights = ["read"]
133
134 filters = [[right, "=", 1] for right in rights]
135 filters += [["share_doctype", "=", doctype]]
136 or_filters = [["user", "=", user]]
137 if user != "Guest":
138 or_filters += [["everyone", "=", 1]]
139
140 shared_docs = frappe.db.get_all(
141 "DocShare", fields=["share_name"], filters=filters, or_filters=or_filters
142 )
143
144 return [doc.share_name for doc in shared_docs]
145
146
147 def get_shared_doctypes(user=None):
148 """Return list of doctypes in which documents are shared for the given user."""
149 if not user:
150 user = frappe.session.user
151 table = frappe.qb.DocType("DocShare")
152 query = (
153 frappe.qb.from_(table)
154 .where((table.user == user) | (table.everyone == 1))
155 .select(table.share_doctype)
156 .distinct()
157 )
158 return query.run(pluck=True)
159
160
161 def get_share_name(doctype, name, user, everyone):
162 if cint(everyone):
163 share_name = frappe.db.get_value(
164 "DocShare", {"everyone": 1, "share_name": name, "share_doctype": doctype}
165 )
166 else:
167 share_name = frappe.db.get_value(
168 "DocShare", {"user": user, "share_name": name, "share_doctype": doctype}
169 )
170
171 return share_name
172
173
174 def check_share_permission(doctype, name):
175 """Check if the user can share with other users"""
176 if not frappe.has_permission(doctype, ptype="share", doc=name):
177 frappe.throw(
178 _("No permission to {0} {1} {2}").format("share", doctype, name), frappe.PermissionError
179 )
180
181
182 def notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):
183
184 if not (shared_by and doctype and doc_name) or everyone or not notify:
185 return
186
187 from frappe.utils import get_fullname
188
189 title = get_title(doctype, doc_name)
190
191 reference_user = get_fullname(frappe.session.user)
192 notification_message = _("{0} shared a document {1} {2} with you").format(
193 frappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)
194 )
195
196 notification_doc = {
197 "type": "Share",
198 "document_type": doctype,
199 "subject": notification_message,
200 "document_name": doc_name,
201 "from_user": frappe.session.user,
202 }
203
204 enqueue_create_notification(shared_by, notification_doc)
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/share.py b/frappe/share.py
--- a/frappe/share.py
+++ b/frappe/share.py
@@ -93,7 +93,7 @@
if not (share.read or share.write or share.submit or share.share):
share.delete()
- share = {}
+ share = None
return share
| {"golden_diff": "diff --git a/frappe/share.py b/frappe/share.py\n--- a/frappe/share.py\n+++ b/frappe/share.py\n@@ -93,7 +93,7 @@\n \n \t\tif not (share.read or share.write or share.submit or share.share):\n \t\t\tshare.delete()\n-\t\t\tshare = {}\n+\t\t\tshare = None\n \n \treturn share\n", "issue": "Remove Share doesn't disappear \n## Description of the issue\r\nWhen the read Permission of a Share is removed by de-selecting the checkbox, then the corresponding DocShare is removed in the Backend, but the checkbox is automatically re-selected in the frontend. After a refresh, the share \r\n\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nfrappe 14.x.x-develop\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Open an arbitrary Document\r\n2. Add a share with read permissions\r\n3. Remove the read permission by clicking the checkbox\r\n\r\n### Observed result\r\nThe checkbox is automatically re-selected\r\n\r\n### Expected result\r\nThe share entry disappears\r\n\r\n## Additional information\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_log.notification_log import (\n\tenqueue_create_notification,\n\tget_title,\n\tget_title_html,\n)\nfrom frappe.desk.form.document_follow import follow_document\nfrom frappe.utils import cint\n\n\[email protected]()\ndef add(\n\tdoctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0\n):\n\t\"\"\"Share the given document with a user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not (flags or {}).get(\"ignore_share_permission\"):\n\t\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\n\tif share_name:\n\t\tdoc = frappe.get_doc(\"DocShare\", share_name)\n\telse:\n\t\tdoc = frappe.new_doc(\"DocShare\")\n\t\tdoc.update(\n\t\t\t{\"user\": user, \"share_doctype\": doctype, \"share_name\": name, \"everyone\": cint(everyone)}\n\t\t)\n\n\tif flags:\n\t\tdoc.flags.update(flags)\n\n\tdoc.update(\n\t\t{\n\t\t\t# always add read, since you are adding!\n\t\t\t\"read\": 1,\n\t\t\t\"write\": cint(write),\n\t\t\t\"submit\": cint(submit),\n\t\t\t\"share\": cint(share),\n\t\t}\n\t)\n\n\tdoc.save(ignore_permissions=True)\n\tnotify_assignment(user, doctype, name, everyone, notify=notify)\n\n\tif frappe.get_cached_value(\"User\", user, \"follow_shared_documents\"):\n\t\tfollow_document(doctype, name, user)\n\n\treturn doc\n\n\ndef remove(doctype, name, user, flags=None):\n\tshare_name = frappe.db.get_value(\n\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t)\n\n\tif share_name:\n\t\tfrappe.delete_doc(\"DocShare\", share_name, flags=flags)\n\n\[email protected]()\ndef set_permission(doctype, name, user, permission_to, value=1, everyone=0):\n\t\"\"\"Set share permission.\"\"\"\n\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\tvalue = int(value)\n\n\tif not share_name:\n\t\tif value:\n\t\t\tshare = add(doctype, name, user, everyone=everyone, **{permission_to: 1})\n\t\telse:\n\t\t\t# no share found, nothing to remove\n\t\t\tshare = {}\n\t\t\tpass\n\telse:\n\t\tshare = frappe.get_doc(\"DocShare\", share_name)\n\t\tshare.flags.ignore_permissions = True\n\t\tshare.set(permission_to, value)\n\n\t\tif not value:\n\t\t\t# un-set higher-order permissions too\n\t\t\tif permission_to == \"read\":\n\t\t\t\tshare.read = share.write = share.submit = share.share = 0\n\n\t\tshare.save()\n\n\t\tif not (share.read or share.write or share.submit or share.share):\n\t\t\tshare.delete()\n\t\t\tshare = {}\n\n\treturn share\n\n\[email protected]()\ndef get_users(doctype, name):\n\t\"\"\"Get list of users with which this document is shared\"\"\"\n\treturn frappe.db.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\n\t\t\t\"`name`\",\n\t\t\t\"`user`\",\n\t\t\t\"`read`\",\n\t\t\t\"`write`\",\n\t\t\t\"`submit`\",\n\t\t\t\"`share`\",\n\t\t\t\"everyone\",\n\t\t\t\"owner\",\n\t\t\t\"creation\",\n\t\t],\n\t\tfilters=dict(share_doctype=doctype, share_name=name),\n\t)\n\n\ndef get_shared(doctype, user=None, rights=None):\n\t\"\"\"Get list of shared document names for given user and DocType.\n\n\t:param doctype: DocType of which shared names are queried.\n\t:param user: User for which shared names are queried.\n\t:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`\"\"\"\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tfilters = [[right, \"=\", 1] for right in rights]\n\tfilters += [[\"share_doctype\", \"=\", doctype]]\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.db.get_all(\n\t\t\"DocShare\", fields=[\"share_name\"], filters=filters, or_filters=or_filters\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]\n\n\ndef get_shared_doctypes(user=None):\n\t\"\"\"Return list of doctypes in which documents are shared for the given user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\ttable = frappe.qb.DocType(\"DocShare\")\n\tquery = (\n\t\tfrappe.qb.from_(table)\n\t\t.where((table.user == user) | (table.everyone == 1))\n\t\t.select(table.share_doctype)\n\t\t.distinct()\n\t)\n\treturn query.run(pluck=True)\n\n\ndef get_share_name(doctype, name, user, everyone):\n\tif cint(everyone):\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"everyone\": 1, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\telse:\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\n\treturn share_name\n\n\ndef check_share_permission(doctype, name):\n\t\"\"\"Check if the user can share with other users\"\"\"\n\tif not frappe.has_permission(doctype, ptype=\"share\", doc=name):\n\t\tfrappe.throw(\n\t\t\t_(\"No permission to {0} {1} {2}\").format(\"share\", doctype, name), frappe.PermissionError\n\t\t)\n\n\ndef notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):\n\n\tif not (shared_by and doctype and doc_name) or everyone or not notify:\n\t\treturn\n\n\tfrom frappe.utils import get_fullname\n\n\ttitle = get_title(doctype, doc_name)\n\n\treference_user = get_fullname(frappe.session.user)\n\tnotification_message = _(\"{0} shared a document {1} {2} with you\").format(\n\t\tfrappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)\n\t)\n\n\tnotification_doc = {\n\t\t\"type\": \"Share\",\n\t\t\"document_type\": doctype,\n\t\t\"subject\": notification_message,\n\t\t\"document_name\": doc_name,\n\t\t\"from_user\": frappe.session.user,\n\t}\n\n\tenqueue_create_notification(shared_by, notification_doc)\n", "path": "frappe/share.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_log.notification_log import (\n\tenqueue_create_notification,\n\tget_title,\n\tget_title_html,\n)\nfrom frappe.desk.form.document_follow import follow_document\nfrom frappe.utils import cint\n\n\[email protected]()\ndef add(\n\tdoctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0\n):\n\t\"\"\"Share the given document with a user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not (flags or {}).get(\"ignore_share_permission\"):\n\t\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\n\tif share_name:\n\t\tdoc = frappe.get_doc(\"DocShare\", share_name)\n\telse:\n\t\tdoc = frappe.new_doc(\"DocShare\")\n\t\tdoc.update(\n\t\t\t{\"user\": user, \"share_doctype\": doctype, \"share_name\": name, \"everyone\": cint(everyone)}\n\t\t)\n\n\tif flags:\n\t\tdoc.flags.update(flags)\n\n\tdoc.update(\n\t\t{\n\t\t\t# always add read, since you are adding!\n\t\t\t\"read\": 1,\n\t\t\t\"write\": cint(write),\n\t\t\t\"submit\": cint(submit),\n\t\t\t\"share\": cint(share),\n\t\t}\n\t)\n\n\tdoc.save(ignore_permissions=True)\n\tnotify_assignment(user, doctype, name, everyone, notify=notify)\n\n\tif frappe.get_cached_value(\"User\", user, \"follow_shared_documents\"):\n\t\tfollow_document(doctype, name, user)\n\n\treturn doc\n\n\ndef remove(doctype, name, user, flags=None):\n\tshare_name = frappe.db.get_value(\n\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t)\n\n\tif share_name:\n\t\tfrappe.delete_doc(\"DocShare\", share_name, flags=flags)\n\n\[email protected]()\ndef set_permission(doctype, name, user, permission_to, value=1, everyone=0):\n\t\"\"\"Set share permission.\"\"\"\n\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\tvalue = int(value)\n\n\tif not share_name:\n\t\tif value:\n\t\t\tshare = add(doctype, name, user, everyone=everyone, **{permission_to: 1})\n\t\telse:\n\t\t\t# no share found, nothing to remove\n\t\t\tshare = {}\n\t\t\tpass\n\telse:\n\t\tshare = frappe.get_doc(\"DocShare\", share_name)\n\t\tshare.flags.ignore_permissions = True\n\t\tshare.set(permission_to, value)\n\n\t\tif not value:\n\t\t\t# un-set higher-order permissions too\n\t\t\tif permission_to == \"read\":\n\t\t\t\tshare.read = share.write = share.submit = share.share = 0\n\n\t\tshare.save()\n\n\t\tif not (share.read or share.write or share.submit or share.share):\n\t\t\tshare.delete()\n\t\t\tshare = None\n\n\treturn share\n\n\[email protected]()\ndef get_users(doctype, name):\n\t\"\"\"Get list of users with which this document is shared\"\"\"\n\treturn frappe.db.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\n\t\t\t\"`name`\",\n\t\t\t\"`user`\",\n\t\t\t\"`read`\",\n\t\t\t\"`write`\",\n\t\t\t\"`submit`\",\n\t\t\t\"`share`\",\n\t\t\t\"everyone\",\n\t\t\t\"owner\",\n\t\t\t\"creation\",\n\t\t],\n\t\tfilters=dict(share_doctype=doctype, share_name=name),\n\t)\n\n\ndef get_shared(doctype, user=None, rights=None):\n\t\"\"\"Get list of shared document names for given user and DocType.\n\n\t:param doctype: DocType of which shared names are queried.\n\t:param user: User for which shared names are queried.\n\t:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`\"\"\"\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tfilters = [[right, \"=\", 1] for right in rights]\n\tfilters += [[\"share_doctype\", \"=\", doctype]]\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.db.get_all(\n\t\t\"DocShare\", fields=[\"share_name\"], filters=filters, or_filters=or_filters\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]\n\n\ndef get_shared_doctypes(user=None):\n\t\"\"\"Return list of doctypes in which documents are shared for the given user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\ttable = frappe.qb.DocType(\"DocShare\")\n\tquery = (\n\t\tfrappe.qb.from_(table)\n\t\t.where((table.user == user) | (table.everyone == 1))\n\t\t.select(table.share_doctype)\n\t\t.distinct()\n\t)\n\treturn query.run(pluck=True)\n\n\ndef get_share_name(doctype, name, user, everyone):\n\tif cint(everyone):\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"everyone\": 1, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\telse:\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\n\treturn share_name\n\n\ndef check_share_permission(doctype, name):\n\t\"\"\"Check if the user can share with other users\"\"\"\n\tif not frappe.has_permission(doctype, ptype=\"share\", doc=name):\n\t\tfrappe.throw(\n\t\t\t_(\"No permission to {0} {1} {2}\").format(\"share\", doctype, name), frappe.PermissionError\n\t\t)\n\n\ndef notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):\n\n\tif not (shared_by and doctype and doc_name) or everyone or not notify:\n\t\treturn\n\n\tfrom frappe.utils import get_fullname\n\n\ttitle = get_title(doctype, doc_name)\n\n\treference_user = get_fullname(frappe.session.user)\n\tnotification_message = _(\"{0} shared a document {1} {2} with you\").format(\n\t\tfrappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)\n\t)\n\n\tnotification_doc = {\n\t\t\"type\": \"Share\",\n\t\t\"document_type\": doctype,\n\t\t\"subject\": notification_message,\n\t\t\"document_name\": doc_name,\n\t\t\"from_user\": frappe.session.user,\n\t}\n\n\tenqueue_create_notification(shared_by, notification_doc)\n", "path": "frappe/share.py"}]} | 2,470 | 79 |
gh_patches_debug_13309 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
note should print when verbosity is set to at least verbose
We have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hypothesis-python/src/hypothesis/control.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import traceback
21
22 from hypothesis.errors import CleanupFailed, InvalidArgument, \
23 UnsatisfiedAssumption
24 from hypothesis.reporting import report
25 from hypothesis.utils.dynamicvariables import DynamicVariable
26
27 if False:
28 from typing import Any, AnyStr # noqa
29
30
31 def reject():
32 raise UnsatisfiedAssumption()
33
34
35 def assume(condition):
36 # type: (Any) -> bool
37 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks
38 the example as bad, rather than failing the test.
39
40 This allows you to specify properties that you *assume* will be
41 true, and let Hypothesis try to avoid similar examples in future.
42 """
43 if not condition:
44 raise UnsatisfiedAssumption()
45 return True
46
47
48 _current_build_context = DynamicVariable(None)
49
50
51 def current_build_context():
52 context = _current_build_context.value
53 if context is None:
54 raise InvalidArgument(
55 u'No build context registered')
56 return context
57
58
59 class BuildContext(object):
60
61 def __init__(self, data, is_final=False, close_on_capture=True):
62 self.data = data
63 self.tasks = []
64 self.is_final = is_final
65 self.close_on_capture = close_on_capture
66 self.close_on_del = False
67 self.notes = []
68
69 def __enter__(self):
70 self.assign_variable = _current_build_context.with_value(self)
71 self.assign_variable.__enter__()
72 return self
73
74 def __exit__(self, exc_type, exc_value, tb):
75 self.assign_variable.__exit__(exc_type, exc_value, tb)
76 if self.close() and exc_type is None:
77 raise CleanupFailed()
78
79 def local(self):
80 return _current_build_context.with_value(self)
81
82 def close(self):
83 any_failed = False
84 for task in self.tasks:
85 try:
86 task()
87 except BaseException:
88 any_failed = True
89 report(traceback.format_exc())
90 return any_failed
91
92
93 def cleanup(teardown):
94 """Register a function to be called when the current test has finished
95 executing. Any exceptions thrown in teardown will be printed but not
96 rethrown.
97
98 Inside a test this isn't very interesting, because you can just use
99 a finally block, but note that you can use this inside map, flatmap,
100 etc. in order to e.g. insist that a value is closed at the end.
101 """
102 context = _current_build_context.value
103 if context is None:
104 raise InvalidArgument(
105 u'Cannot register cleanup outside of build context')
106 context.tasks.append(teardown)
107
108
109 def note(value):
110 # type: (AnyStr) -> None
111 """Report this value in the final execution."""
112 context = _current_build_context.value
113 if context is None:
114 raise InvalidArgument(
115 'Cannot make notes outside of a test')
116 context.notes.append(value)
117 if context.is_final:
118 report(value)
119
120
121 def event(value):
122 # type: (AnyStr) -> None
123 """Record an event that occurred this test. Statistics on number of test
124 runs with each event will be reported at the end if you run Hypothesis in
125 statistics reporting mode.
126
127 Events should be strings or convertible to them.
128 """
129 context = _current_build_context.value
130 if context is None:
131 raise InvalidArgument(
132 'Cannot make record events outside of a test')
133
134 if context.data is not None:
135 context.data.note_event(value)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py
--- a/hypothesis-python/src/hypothesis/control.py
+++ b/hypothesis-python/src/hypothesis/control.py
@@ -19,6 +19,7 @@
import traceback
+from hypothesis import Verbosity, settings
from hypothesis.errors import CleanupFailed, InvalidArgument, \
UnsatisfiedAssumption
from hypothesis.reporting import report
@@ -114,7 +115,7 @@
raise InvalidArgument(
'Cannot make notes outside of a test')
context.notes.append(value)
- if context.is_final:
+ if context.is_final or settings.default.verbosity >= Verbosity.verbose:
report(value)
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -19,6 +19,7 @@\n \n import traceback\n \n+from hypothesis import Verbosity, settings\n from hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\n from hypothesis.reporting import report\n@@ -114,7 +115,7 @@\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n- if context.is_final:\n+ if context.is_final or settings.default.verbosity >= Verbosity.verbose:\n report(value)\n", "issue": "note should print when verbosity is set to at least verbose \nWe have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run. \n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport traceback\n\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\nfrom hypothesis.reporting import report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\nif False:\n from typing import Any, AnyStr # noqa\n\n\ndef reject():\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition):\n # type: (Any) -> bool\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'No build context registered')\n return context\n\n\nclass BuildContext(object):\n\n def __init__(self, data, is_final=False, close_on_capture=True):\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def local(self):\n return _current_build_context.with_value(self)\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'Cannot register cleanup outside of build context')\n context.tasks.append(teardown)\n\n\ndef note(value):\n # type: (AnyStr) -> None\n \"\"\"Report this value in the final execution.\"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n if context.is_final:\n report(value)\n\n\ndef event(value):\n # type: (AnyStr) -> None\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make record events outside of a test')\n\n if context.data is not None:\n context.data.note_event(value)\n", "path": "hypothesis-python/src/hypothesis/control.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport traceback\n\nfrom hypothesis import Verbosity, settings\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\nfrom hypothesis.reporting import report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\nif False:\n from typing import Any, AnyStr # noqa\n\n\ndef reject():\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition):\n # type: (Any) -> bool\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'No build context registered')\n return context\n\n\nclass BuildContext(object):\n\n def __init__(self, data, is_final=False, close_on_capture=True):\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def local(self):\n return _current_build_context.with_value(self)\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'Cannot register cleanup outside of build context')\n context.tasks.append(teardown)\n\n\ndef note(value):\n # type: (AnyStr) -> None\n \"\"\"Report this value in the final execution.\"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n if context.is_final or settings.default.verbosity >= Verbosity.verbose:\n report(value)\n\n\ndef event(value):\n # type: (AnyStr) -> None\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make record events outside of a test')\n\n if context.data is not None:\n context.data.note_event(value)\n", "path": "hypothesis-python/src/hypothesis/control.py"}]} | 1,569 | 162 |
gh_patches_debug_342 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5236 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
qa_loader.py uses a fixed random seed every run
## Description
Always using the same seed makes it impossible to run `qa_loader.py` multiple times with the same database, as supposedly random values aren't, causing unique constraint violations.
## Steps to Reproduce
- Run the dev server with `make dev`
- Start a shell in the container with `docker exec -it securedrop-dev-0 bash`
- In that shell, run `./qa_loader --journalist-count 1 --source-count 1`
- Run the same command a second time.
## Expected Behavior
That you could keep adding random journalists and sources to the database.
## Actual Behavior
You get `sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: journalists.username` because [`random.seed` is always called with the same value](https://github.com/freedomofpress/securedrop/blob/ec2220c3c2b9120d029b616d3a07647b175bc6ab/securedrop/qa_loader.py#L22).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/qa_loader.py`
Content:
```
1 #!/opt/venvs/securedrop-app-code/bin/python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import random
6 import string
7 import sys
8 from argparse import ArgumentParser
9 from datetime import datetime
10 from itertools import cycle
11 from os import path
12
13 from flask import current_app
14
15 from crypto_util import DICEWARE_SAFE_CHARS
16 from db import db
17 from journalist_app import create_app
18 from models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission
19 from sdconfig import config as sdconfig
20
21
22 random.seed("~(=^–^)") # mrow?
23
24
25 def random_bool():
26 return bool(random.getrandbits(1))
27
28
29 def random_chars(len, nullable, chars=string.ascii_letters):
30 if nullable and random_bool():
31 return None
32 else:
33 return "".join([random.choice(chars) for _ in range(len)])
34
35
36 def bool_or_none():
37 return random.choice([True, False, None])
38
39
40 def random_datetime(nullable):
41 if nullable and random_bool():
42 return None
43 else:
44 now = datetime.now()
45 return datetime(
46 year=random.randint(2013, now.year),
47 month=random.randint(1, now.month),
48 day=random.randint(1, now.day),
49 hour=random.randint(0, 23),
50 minute=random.randint(0, 59),
51 second=random.randint(0, 59),
52 microsecond=random.randint(0, 1000),
53 )
54
55
56 def positive_int(s):
57 i = int(s)
58 if i < 1:
59 raise ValueError("{} is not >= 1".format(s))
60 return i
61
62
63 def fraction(s):
64 f = float(s)
65 if 0 <= f <= 1:
66 return f
67 raise ValueError("{} should be a float between 0 and 1".format(s))
68
69
70 submissions = cycle(
71 [
72 "This is a test submission without markup!",
73 'This is a test submission with markup and characters such as \, \\, \', " and ". '
74 + "<strong>This text should not be bold</strong>!", # noqa: W605, E501
75 ]
76 )
77
78
79 replies = cycle(
80 [
81 "This is a test reply without markup!",
82 'This is a test reply with markup and characters such as \, \\, \', " and ". '
83 + "<strong>This text should not be bold</strong>!", # noqa: W605, E501
84 ]
85 )
86
87
88 class QaLoader(object):
89 def __init__(
90 self,
91 config,
92 journalist_count=10,
93 source_count=50,
94 submissions_per_source=1,
95 replies_per_source=1,
96 source_star_fraction=0.1,
97 source_reply_fraction=0.5,
98 ):
99 """
100 source_star_fraction and source_reply_fraction are simply the
101 fraction of sources starred or replied to.
102 """
103 self.config = config
104 self.app = create_app(config)
105
106 self.journalist_count = journalist_count
107 self.source_count = source_count
108 self.submissions_per_source = submissions_per_source
109 self.replies_per_source = replies_per_source
110 self.source_star_fraction = source_star_fraction
111 self.source_reply_fraction = source_reply_fraction
112
113 self.journalists = []
114 self.sources = []
115
116 def new_journalist(self):
117 # Make a diceware-like password
118 pw = " ".join(
119 [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]
120 )
121 journalist = Journalist(
122 username=random_chars(random.randint(3, 32), nullable=False),
123 password=pw,
124 is_admin=random_bool(),
125 )
126 if random_bool():
127 # to add legacy passwords back in
128 journalist.passphrase_hash = None
129 journalist.pw_salt = random_chars(32, nullable=False).encode("utf-8")
130 journalist.pw_hash = random_chars(64, nullable=False).encode("utf-8")
131
132 journalist.is_admin = bool_or_none()
133
134 journalist.is_totp = bool_or_none()
135 journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None
136 journalist.created_on = random_datetime(nullable=True)
137 journalist.last_access = random_datetime(nullable=True)
138
139 db.session.add(journalist)
140 db.session.flush()
141 self.journalists.append(journalist.id)
142
143 def new_source(self):
144 codename = current_app.crypto_util.genrandomid()
145 filesystem_id = current_app.crypto_util.hash_codename(codename)
146 journalist_designation = current_app.crypto_util.display_id()
147 source = Source(filesystem_id, journalist_designation)
148 db.session.add(source)
149 db.session.flush()
150
151 # Generate submissions directory and generate source key
152 os.mkdir(current_app.storage.path(source.filesystem_id))
153 current_app.crypto_util.genkeypair(source.filesystem_id, codename)
154
155 self.sources.append(source.id)
156
157 def new_submission(self, source_id):
158 source = Source.query.get(source_id)
159
160 source.interaction_count += 1
161 fpath = current_app.storage.save_message_submission(
162 source.filesystem_id,
163 source.interaction_count,
164 source.journalist_filename,
165 next(submissions),
166 )
167 submission = Submission(source, fpath)
168 db.session.add(submission)
169
170 source.pending = False
171 source.last_updated = datetime.utcnow()
172
173 db.session.flush()
174
175 def new_source_star(self, source_id):
176 source = Source.query.get(source_id)
177 star = SourceStar(source, bool_or_none())
178 db.session.add(star)
179
180 def new_reply(self, journalist_id, source_id):
181 source = Source.query.get(source_id)
182
183 journalist = Journalist.query.get(journalist_id)
184
185 source.interaction_count += 1
186 source.last_updated = datetime.utcnow()
187
188 fname = "{}-{}-reply.gpg".format(source.interaction_count, source.journalist_filename)
189 current_app.crypto_util.encrypt(
190 next(replies),
191 [
192 current_app.crypto_util.get_fingerprint(source.filesystem_id),
193 sdconfig.JOURNALIST_KEY
194 ],
195 current_app.storage.path(source.filesystem_id, fname),
196 )
197
198 reply = Reply(journalist, source, fname)
199 db.session.add(reply)
200 db.session.flush()
201
202 def new_journalist_login_attempt(self, journalist_id):
203 journalist = Journalist.query.get(journalist_id)
204 attempt = JournalistLoginAttempt(journalist)
205 attempt.timestamp = random_datetime(nullable=True)
206 db.session.add(attempt)
207
208 def load(self):
209 with self.app.app_context():
210 print("Creating {:d} journalists...".format(self.journalist_count))
211 for i in range(1, self.journalist_count + 1):
212 self.new_journalist()
213 if i % min(10, max(1, int(self.journalist_count / 10))) == 0:
214 sys.stdout.write("{}\r{}".format(" " * len(str(self.journalist_count + 1)), i))
215 print("\n")
216 db.session.commit()
217
218 print("Creating {:d} sources...".format(self.source_count))
219 for i in range(1, self.source_count + 1):
220 self.new_source()
221 if i % min(10, max(1, int(self.source_count / 10))) == 0:
222 sys.stdout.write("{}\r{}".format(" " * len(str(self.source_count + 1)), i))
223 print("\n")
224 db.session.commit()
225
226 print(
227 "Creating submissions ({:d} each) for each source...".format(
228 self.submissions_per_source
229 )
230 )
231 for sid in self.sources:
232 for _ in range(1, self.submissions_per_source + 1):
233 self.new_submission(sid)
234 db.session.commit()
235
236 print("Starring {:.2f}% of all sources...".format(self.source_star_fraction * 100))
237 for sid in random.sample(
238 self.sources, int(self.source_count * self.source_star_fraction)
239 ):
240 self.new_source_star(sid)
241 db.session.commit()
242
243 print(
244 "Creating replies ({:d} each) for {:.2f}% of sources...".format(
245 self.replies_per_source, self.source_reply_fraction * 100
246 )
247 )
248 for sid in random.sample(
249 self.sources, int(self.source_count * self.source_reply_fraction)
250 ):
251 jid = random.choice(self.journalists)
252 for _ in range(self.replies_per_source):
253 self.new_reply(jid, sid)
254 db.session.commit()
255
256 for jid in self.journalists:
257 self.new_journalist_login_attempt(jid)
258 db.session.commit()
259
260
261 def arg_parser():
262 parser = ArgumentParser(
263 path.basename(__file__), description="Loads data into the database for testing upgrades"
264 )
265 parser.add_argument(
266 "--journalist-count",
267 type=positive_int,
268 default=10,
269 help=("Number of journalists to create"),
270 )
271 parser.add_argument(
272 "--source-count", type=positive_int, default=50, help=("Number of sources to create")
273 )
274 parser.add_argument(
275 "--submissions-per-source",
276 type=positive_int,
277 default=1,
278 help=("Number of submissions to create for each source"),
279 )
280 parser.add_argument(
281 "--replies-per-source",
282 type=positive_int,
283 default=1,
284 help=("Number of replies to create for each source"),
285 )
286 parser.add_argument(
287 "--source-star-fraction",
288 type=fraction,
289 default=0.1,
290 help=("Fraction of sources to star"),
291 )
292 parser.add_argument(
293 "--source-reply-fraction",
294 type=fraction,
295 default=0.5,
296 help=("Fraction of sources to reply to"),
297 )
298 return parser
299
300
301 def main():
302 args = arg_parser().parse_args()
303 print("Loading data. This may take a while.")
304 QaLoader(
305 sdconfig,
306 args.journalist_count,
307 args.source_count,
308 args.submissions_per_source,
309 args.replies_per_source,
310 args.source_star_fraction,
311 args.source_reply_fraction,
312 ).load()
313
314
315 if __name__ == "__main__":
316 try:
317 main()
318 except KeyboardInterrupt:
319 print("") # for prompt on a newline
320 sys.exit(1)
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/qa_loader.py b/securedrop/qa_loader.py
--- a/securedrop/qa_loader.py
+++ b/securedrop/qa_loader.py
@@ -19,9 +19,6 @@
from sdconfig import config as sdconfig
-random.seed("~(=^–^)") # mrow?
-
-
def random_bool():
return bool(random.getrandbits(1))
| {"golden_diff": "diff --git a/securedrop/qa_loader.py b/securedrop/qa_loader.py\n--- a/securedrop/qa_loader.py\n+++ b/securedrop/qa_loader.py\n@@ -19,9 +19,6 @@\n from sdconfig import config as sdconfig\n \n \n-random.seed(\"~(=^\u2013^)\") # mrow?\n-\n-\n def random_bool():\n return bool(random.getrandbits(1))\n", "issue": "qa_loader.py uses a fixed random seed every run\n## Description\r\n\r\nAlways using the same seed makes it impossible to run `qa_loader.py` multiple times with the same database, as supposedly random values aren't, causing unique constraint violations.\r\n\r\n## Steps to Reproduce\r\n\r\n- Run the dev server with `make dev`\r\n- Start a shell in the container with `docker exec -it securedrop-dev-0 bash`\r\n- In that shell, run `./qa_loader --journalist-count 1 --source-count 1`\r\n- Run the same command a second time.\r\n\r\n## Expected Behavior\r\n\r\nThat you could keep adding random journalists and sources to the database.\r\n\r\n## Actual Behavior\r\n\r\nYou get `sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: journalists.username` because [`random.seed` is always called with the same value](https://github.com/freedomofpress/securedrop/blob/ec2220c3c2b9120d029b616d3a07647b175bc6ab/securedrop/qa_loader.py#L22).\n", "before_files": [{"content": "#!/opt/venvs/securedrop-app-code/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport string\nimport sys\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom itertools import cycle\nfrom os import path\n\nfrom flask import current_app\n\nfrom crypto_util import DICEWARE_SAFE_CHARS\nfrom db import db\nfrom journalist_app import create_app\nfrom models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission\nfrom sdconfig import config as sdconfig\n\n\nrandom.seed(\"~(=^\u2013^)\") # mrow?\n\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\n\ndef random_chars(len, nullable, chars=string.ascii_letters):\n if nullable and random_bool():\n return None\n else:\n return \"\".join([random.choice(chars) for _ in range(len)])\n\n\ndef bool_or_none():\n return random.choice([True, False, None])\n\n\ndef random_datetime(nullable):\n if nullable and random_bool():\n return None\n else:\n now = datetime.now()\n return datetime(\n year=random.randint(2013, now.year),\n month=random.randint(1, now.month),\n day=random.randint(1, now.day),\n hour=random.randint(0, 23),\n minute=random.randint(0, 59),\n second=random.randint(0, 59),\n microsecond=random.randint(0, 1000),\n )\n\n\ndef positive_int(s):\n i = int(s)\n if i < 1:\n raise ValueError(\"{} is not >= 1\".format(s))\n return i\n\n\ndef fraction(s):\n f = float(s)\n if 0 <= f <= 1:\n return f\n raise ValueError(\"{} should be a float between 0 and 1\".format(s))\n\n\nsubmissions = cycle(\n [\n \"This is a test submission without markup!\",\n 'This is a test submission with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nreplies = cycle(\n [\n \"This is a test reply without markup!\",\n 'This is a test reply with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nclass QaLoader(object):\n def __init__(\n self,\n config,\n journalist_count=10,\n source_count=50,\n submissions_per_source=1,\n replies_per_source=1,\n source_star_fraction=0.1,\n source_reply_fraction=0.5,\n ):\n \"\"\"\n source_star_fraction and source_reply_fraction are simply the\n fraction of sources starred or replied to.\n \"\"\"\n self.config = config\n self.app = create_app(config)\n\n self.journalist_count = journalist_count\n self.source_count = source_count\n self.submissions_per_source = submissions_per_source\n self.replies_per_source = replies_per_source\n self.source_star_fraction = source_star_fraction\n self.source_reply_fraction = source_reply_fraction\n\n self.journalists = []\n self.sources = []\n\n def new_journalist(self):\n # Make a diceware-like password\n pw = \" \".join(\n [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]\n )\n journalist = Journalist(\n username=random_chars(random.randint(3, 32), nullable=False),\n password=pw,\n is_admin=random_bool(),\n )\n if random_bool():\n # to add legacy passwords back in\n journalist.passphrase_hash = None\n journalist.pw_salt = random_chars(32, nullable=False).encode(\"utf-8\")\n journalist.pw_hash = random_chars(64, nullable=False).encode(\"utf-8\")\n\n journalist.is_admin = bool_or_none()\n\n journalist.is_totp = bool_or_none()\n journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None\n journalist.created_on = random_datetime(nullable=True)\n journalist.last_access = random_datetime(nullable=True)\n\n db.session.add(journalist)\n db.session.flush()\n self.journalists.append(journalist.id)\n\n def new_source(self):\n codename = current_app.crypto_util.genrandomid()\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n journalist_designation = current_app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n db.session.add(source)\n db.session.flush()\n\n # Generate submissions directory and generate source key\n os.mkdir(current_app.storage.path(source.filesystem_id))\n current_app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n self.sources.append(source.id)\n\n def new_submission(self, source_id):\n source = Source.query.get(source_id)\n\n source.interaction_count += 1\n fpath = current_app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n next(submissions),\n )\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n source.pending = False\n source.last_updated = datetime.utcnow()\n\n db.session.flush()\n\n def new_source_star(self, source_id):\n source = Source.query.get(source_id)\n star = SourceStar(source, bool_or_none())\n db.session.add(star)\n\n def new_reply(self, journalist_id, source_id):\n source = Source.query.get(source_id)\n\n journalist = Journalist.query.get(journalist_id)\n\n source.interaction_count += 1\n source.last_updated = datetime.utcnow()\n\n fname = \"{}-{}-reply.gpg\".format(source.interaction_count, source.journalist_filename)\n current_app.crypto_util.encrypt(\n next(replies),\n [\n current_app.crypto_util.get_fingerprint(source.filesystem_id),\n sdconfig.JOURNALIST_KEY\n ],\n current_app.storage.path(source.filesystem_id, fname),\n )\n\n reply = Reply(journalist, source, fname)\n db.session.add(reply)\n db.session.flush()\n\n def new_journalist_login_attempt(self, journalist_id):\n journalist = Journalist.query.get(journalist_id)\n attempt = JournalistLoginAttempt(journalist)\n attempt.timestamp = random_datetime(nullable=True)\n db.session.add(attempt)\n\n def load(self):\n with self.app.app_context():\n print(\"Creating {:d} journalists...\".format(self.journalist_count))\n for i in range(1, self.journalist_count + 1):\n self.new_journalist()\n if i % min(10, max(1, int(self.journalist_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.journalist_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\"Creating {:d} sources...\".format(self.source_count))\n for i in range(1, self.source_count + 1):\n self.new_source()\n if i % min(10, max(1, int(self.source_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.source_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\n \"Creating submissions ({:d} each) for each source...\".format(\n self.submissions_per_source\n )\n )\n for sid in self.sources:\n for _ in range(1, self.submissions_per_source + 1):\n self.new_submission(sid)\n db.session.commit()\n\n print(\"Starring {:.2f}% of all sources...\".format(self.source_star_fraction * 100))\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_star_fraction)\n ):\n self.new_source_star(sid)\n db.session.commit()\n\n print(\n \"Creating replies ({:d} each) for {:.2f}% of sources...\".format(\n self.replies_per_source, self.source_reply_fraction * 100\n )\n )\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_reply_fraction)\n ):\n jid = random.choice(self.journalists)\n for _ in range(self.replies_per_source):\n self.new_reply(jid, sid)\n db.session.commit()\n\n for jid in self.journalists:\n self.new_journalist_login_attempt(jid)\n db.session.commit()\n\n\ndef arg_parser():\n parser = ArgumentParser(\n path.basename(__file__), description=\"Loads data into the database for testing upgrades\"\n )\n parser.add_argument(\n \"--journalist-count\",\n type=positive_int,\n default=10,\n help=(\"Number of journalists to create\"),\n )\n parser.add_argument(\n \"--source-count\", type=positive_int, default=50, help=(\"Number of sources to create\")\n )\n parser.add_argument(\n \"--submissions-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of submissions to create for each source\"),\n )\n parser.add_argument(\n \"--replies-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of replies to create for each source\"),\n )\n parser.add_argument(\n \"--source-star-fraction\",\n type=fraction,\n default=0.1,\n help=(\"Fraction of sources to star\"),\n )\n parser.add_argument(\n \"--source-reply-fraction\",\n type=fraction,\n default=0.5,\n help=(\"Fraction of sources to reply to\"),\n )\n return parser\n\n\ndef main():\n args = arg_parser().parse_args()\n print(\"Loading data. This may take a while.\")\n QaLoader(\n sdconfig,\n args.journalist_count,\n args.source_count,\n args.submissions_per_source,\n args.replies_per_source,\n args.source_star_fraction,\n args.source_reply_fraction,\n ).load()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\") # for prompt on a newline\n sys.exit(1)\n", "path": "securedrop/qa_loader.py"}], "after_files": [{"content": "#!/opt/venvs/securedrop-app-code/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport string\nimport sys\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom itertools import cycle\nfrom os import path\n\nfrom flask import current_app\n\nfrom crypto_util import DICEWARE_SAFE_CHARS\nfrom db import db\nfrom journalist_app import create_app\nfrom models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission\nfrom sdconfig import config as sdconfig\n\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\n\ndef random_chars(len, nullable, chars=string.ascii_letters):\n if nullable and random_bool():\n return None\n else:\n return \"\".join([random.choice(chars) for _ in range(len)])\n\n\ndef bool_or_none():\n return random.choice([True, False, None])\n\n\ndef random_datetime(nullable):\n if nullable and random_bool():\n return None\n else:\n now = datetime.now()\n return datetime(\n year=random.randint(2013, now.year),\n month=random.randint(1, now.month),\n day=random.randint(1, now.day),\n hour=random.randint(0, 23),\n minute=random.randint(0, 59),\n second=random.randint(0, 59),\n microsecond=random.randint(0, 1000),\n )\n\n\ndef positive_int(s):\n i = int(s)\n if i < 1:\n raise ValueError(\"{} is not >= 1\".format(s))\n return i\n\n\ndef fraction(s):\n f = float(s)\n if 0 <= f <= 1:\n return f\n raise ValueError(\"{} should be a float between 0 and 1\".format(s))\n\n\nsubmissions = cycle(\n [\n \"This is a test submission without markup!\",\n 'This is a test submission with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nreplies = cycle(\n [\n \"This is a test reply without markup!\",\n 'This is a test reply with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nclass QaLoader(object):\n def __init__(\n self,\n config,\n journalist_count=10,\n source_count=50,\n submissions_per_source=1,\n replies_per_source=1,\n source_star_fraction=0.1,\n source_reply_fraction=0.5,\n ):\n \"\"\"\n source_star_fraction and source_reply_fraction are simply the\n fraction of sources starred or replied to.\n \"\"\"\n self.config = config\n self.app = create_app(config)\n\n self.journalist_count = journalist_count\n self.source_count = source_count\n self.submissions_per_source = submissions_per_source\n self.replies_per_source = replies_per_source\n self.source_star_fraction = source_star_fraction\n self.source_reply_fraction = source_reply_fraction\n\n self.journalists = []\n self.sources = []\n\n def new_journalist(self):\n # Make a diceware-like password\n pw = \" \".join(\n [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]\n )\n journalist = Journalist(\n username=random_chars(random.randint(3, 32), nullable=False),\n password=pw,\n is_admin=random_bool(),\n )\n if random_bool():\n # to add legacy passwords back in\n journalist.passphrase_hash = None\n journalist.pw_salt = random_chars(32, nullable=False).encode(\"utf-8\")\n journalist.pw_hash = random_chars(64, nullable=False).encode(\"utf-8\")\n\n journalist.is_admin = bool_or_none()\n\n journalist.is_totp = bool_or_none()\n journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None\n journalist.created_on = random_datetime(nullable=True)\n journalist.last_access = random_datetime(nullable=True)\n\n db.session.add(journalist)\n db.session.flush()\n self.journalists.append(journalist.id)\n\n def new_source(self):\n codename = current_app.crypto_util.genrandomid()\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n journalist_designation = current_app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n db.session.add(source)\n db.session.flush()\n\n # Generate submissions directory and generate source key\n os.mkdir(current_app.storage.path(source.filesystem_id))\n current_app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n self.sources.append(source.id)\n\n def new_submission(self, source_id):\n source = Source.query.get(source_id)\n\n source.interaction_count += 1\n fpath = current_app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n next(submissions),\n )\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n source.pending = False\n source.last_updated = datetime.utcnow()\n\n db.session.flush()\n\n def new_source_star(self, source_id):\n source = Source.query.get(source_id)\n star = SourceStar(source, bool_or_none())\n db.session.add(star)\n\n def new_reply(self, journalist_id, source_id):\n source = Source.query.get(source_id)\n\n journalist = Journalist.query.get(journalist_id)\n\n source.interaction_count += 1\n source.last_updated = datetime.utcnow()\n\n fname = \"{}-{}-reply.gpg\".format(source.interaction_count, source.journalist_filename)\n current_app.crypto_util.encrypt(\n next(replies),\n [\n current_app.crypto_util.get_fingerprint(source.filesystem_id),\n sdconfig.JOURNALIST_KEY\n ],\n current_app.storage.path(source.filesystem_id, fname),\n )\n\n reply = Reply(journalist, source, fname)\n db.session.add(reply)\n db.session.flush()\n\n def new_journalist_login_attempt(self, journalist_id):\n journalist = Journalist.query.get(journalist_id)\n attempt = JournalistLoginAttempt(journalist)\n attempt.timestamp = random_datetime(nullable=True)\n db.session.add(attempt)\n\n def load(self):\n with self.app.app_context():\n print(\"Creating {:d} journalists...\".format(self.journalist_count))\n for i in range(1, self.journalist_count + 1):\n self.new_journalist()\n if i % min(10, max(1, int(self.journalist_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.journalist_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\"Creating {:d} sources...\".format(self.source_count))\n for i in range(1, self.source_count + 1):\n self.new_source()\n if i % min(10, max(1, int(self.source_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.source_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\n \"Creating submissions ({:d} each) for each source...\".format(\n self.submissions_per_source\n )\n )\n for sid in self.sources:\n for _ in range(1, self.submissions_per_source + 1):\n self.new_submission(sid)\n db.session.commit()\n\n print(\"Starring {:.2f}% of all sources...\".format(self.source_star_fraction * 100))\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_star_fraction)\n ):\n self.new_source_star(sid)\n db.session.commit()\n\n print(\n \"Creating replies ({:d} each) for {:.2f}% of sources...\".format(\n self.replies_per_source, self.source_reply_fraction * 100\n )\n )\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_reply_fraction)\n ):\n jid = random.choice(self.journalists)\n for _ in range(self.replies_per_source):\n self.new_reply(jid, sid)\n db.session.commit()\n\n for jid in self.journalists:\n self.new_journalist_login_attempt(jid)\n db.session.commit()\n\n\ndef arg_parser():\n parser = ArgumentParser(\n path.basename(__file__), description=\"Loads data into the database for testing upgrades\"\n )\n parser.add_argument(\n \"--journalist-count\",\n type=positive_int,\n default=10,\n help=(\"Number of journalists to create\"),\n )\n parser.add_argument(\n \"--source-count\", type=positive_int, default=50, help=(\"Number of sources to create\")\n )\n parser.add_argument(\n \"--submissions-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of submissions to create for each source\"),\n )\n parser.add_argument(\n \"--replies-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of replies to create for each source\"),\n )\n parser.add_argument(\n \"--source-star-fraction\",\n type=fraction,\n default=0.1,\n help=(\"Fraction of sources to star\"),\n )\n parser.add_argument(\n \"--source-reply-fraction\",\n type=fraction,\n default=0.5,\n help=(\"Fraction of sources to reply to\"),\n )\n return parser\n\n\ndef main():\n args = arg_parser().parse_args()\n print(\"Loading data. This may take a while.\")\n QaLoader(\n sdconfig,\n args.journalist_count,\n args.source_count,\n args.submissions_per_source,\n args.replies_per_source,\n args.source_star_fraction,\n args.source_reply_fraction,\n ).load()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\") # for prompt on a newline\n sys.exit(1)\n", "path": "securedrop/qa_loader.py"}]} | 3,608 | 92 |
gh_patches_debug_40821 | rasdani/github-patches | git_diff | falconry__falcon-2026 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement FloatConverter
Implement `FloatConverter` along the lines of [`IntConverter`](https://falcon.readthedocs.io/en/stable/api/routing.html#falcon.routing.IntConverter). Draw inspiration from `IntConverter`, or even find an efficient way to share code between the two!
Add the new converter to the list of [Built-in Converters](https://falcon.readthedocs.io/en/stable/api/routing.html#built-in-converters) under the `float` identifier.
Open questions: should we support converting `nan`, `inf` & `-inf` from path?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/routing/converters.py`
Content:
```
1 # Copyright 2017 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import abc
16 from datetime import datetime
17 import uuid
18
19 __all__ = (
20 'BaseConverter',
21 'IntConverter',
22 'DateTimeConverter',
23 'UUIDConverter',
24 )
25
26
27 # PERF(kgriffs): Avoid an extra namespace lookup when using this function
28 strptime = datetime.strptime
29
30
31 class BaseConverter(metaclass=abc.ABCMeta):
32 """Abstract base class for URI template field converters."""
33
34 CONSUME_MULTIPLE_SEGMENTS = False
35 """When set to ``True`` it indicates that this converter will consume
36 multiple URL path segments. Currently a converter with
37 ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template
38 effectively meaning that it will consume all of the remaining URL path
39 segments.
40 """
41
42 @abc.abstractmethod # pragma: no cover
43 def convert(self, value):
44 """Convert a URI template field value to another format or type.
45
46 Args:
47 value (str or List[str]): Original string to convert.
48 If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a
49 list of strings containing the path segments matched by
50 the converter.
51
52 Returns:
53 object: Converted field value, or ``None`` if the field
54 can not be converted.
55 """
56
57
58 def _consumes_multiple_segments(converter):
59 return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)
60
61
62 class IntConverter(BaseConverter):
63 """Converts a field value to an int.
64
65 Identifier: `int`
66
67 Keyword Args:
68 num_digits (int): Require the value to have the given
69 number of digits.
70 min (int): Reject the value if it is less than this number.
71 max (int): Reject the value if it is greater than this number.
72 """
73
74 __slots__ = ('_num_digits', '_min', '_max')
75
76 def __init__(self, num_digits=None, min=None, max=None):
77 if num_digits is not None and num_digits < 1:
78 raise ValueError('num_digits must be at least 1')
79
80 self._num_digits = num_digits
81 self._min = min
82 self._max = max
83
84 def convert(self, value):
85 if self._num_digits is not None and len(value) != self._num_digits:
86 return None
87
88 # NOTE(kgriffs): int() will accept numbers with preceding or
89 # trailing whitespace, so we need to do our own check. Using
90 # strip() is faster than either a regex or a series of or'd
91 # membership checks via "in", esp. as the length of contiguous
92 # numbers in the value grows.
93 if value.strip() != value:
94 return None
95
96 try:
97 value = int(value)
98 except ValueError:
99 return None
100
101 if self._min is not None and value < self._min:
102 return None
103
104 if self._max is not None and value > self._max:
105 return None
106
107 return value
108
109
110 class DateTimeConverter(BaseConverter):
111 """Converts a field value to a datetime.
112
113 Identifier: `dt`
114
115 Keyword Args:
116 format_string (str): String used to parse the field value
117 into a datetime. Any format recognized by strptime() is
118 supported (default ``'%Y-%m-%dT%H:%M:%SZ'``).
119 """
120
121 __slots__ = ('_format_string',)
122
123 def __init__(self, format_string='%Y-%m-%dT%H:%M:%SZ'):
124 self._format_string = format_string
125
126 def convert(self, value):
127 try:
128 return strptime(value, self._format_string)
129 except ValueError:
130 return None
131
132
133 class UUIDConverter(BaseConverter):
134 """Converts a field value to a uuid.UUID.
135
136 Identifier: `uuid`
137
138 In order to be converted, the field value must consist of a
139 string of 32 hexadecimal digits, as defined in RFC 4122, Section 3.
140 Note, however, that hyphens and the URN prefix are optional.
141 """
142
143 def convert(self, value):
144 try:
145 return uuid.UUID(value)
146 except ValueError:
147 return None
148
149
150 class PathConverter(BaseConverter):
151 """Field converted used to match the rest of the path.
152
153 This field converter matches the remainder of the URL path,
154 returning it as a string.
155
156 This converter is currently supported only when used at the
157 end of the URL template.
158
159 The classic routing rules of falcon apply also to this converter:
160 considering the template ``'/foo/bar/{matched_path:path}'``, the path
161 ``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will
162 match, producing ``matched_path=''``, when
163 :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``
164 (the default), while it will *not* match when that option is ``True``.
165
166 (See also: :ref:`trailing_slash_in_path`)
167 """
168
169 CONSUME_MULTIPLE_SEGMENTS = True
170
171 def convert(self, value):
172 return '/'.join(value)
173
174
175 BUILTIN = (
176 ('int', IntConverter),
177 ('dt', DateTimeConverter),
178 ('uuid', UUIDConverter),
179 ('path', PathConverter),
180 )
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/routing/converters.py b/falcon/routing/converters.py
--- a/falcon/routing/converters.py
+++ b/falcon/routing/converters.py
@@ -14,6 +14,7 @@
import abc
from datetime import datetime
+from math import isfinite
import uuid
__all__ = (
@@ -21,6 +22,7 @@
'IntConverter',
'DateTimeConverter',
'UUIDConverter',
+ 'FloatConverter',
)
@@ -76,7 +78,6 @@
def __init__(self, num_digits=None, min=None, max=None):
if num_digits is not None and num_digits < 1:
raise ValueError('num_digits must be at least 1')
-
self._num_digits = num_digits
self._min = min
self._max = max
@@ -86,10 +87,10 @@
return None
# NOTE(kgriffs): int() will accept numbers with preceding or
- # trailing whitespace, so we need to do our own check. Using
- # strip() is faster than either a regex or a series of or'd
- # membership checks via "in", esp. as the length of contiguous
- # numbers in the value grows.
+ # trailing whitespace, so we need to do our own check. Using
+ # strip() is faster than either a regex or a series of or'd
+ # membership checks via "in", esp. as the length of contiguous
+ # numbers in the value grows.
if value.strip() != value:
return None
@@ -98,15 +99,52 @@
except ValueError:
return None
+ return self._validate_min_max_value(value)
+
+ def _validate_min_max_value(self, value):
if self._min is not None and value < self._min:
return None
-
if self._max is not None and value > self._max:
return None
return value
+class FloatConverter(IntConverter):
+ """Converts a field value to an float.
+
+ Identifier: `float`
+ Keyword Args:
+ min (float): Reject the value if it is less than this number.
+ max (float): Reject the value if it is greater than this number.
+ finite (bool) : Determines whether or not to only match ordinary
+ finite numbers (default: ``True``). Set to ``False`` to match
+ nan, inf, and -inf in addition to finite numbers.
+ """
+
+ __slots__ = '_finite'
+
+ def __init__(self, min: float = None, max: float = None, finite: bool = True):
+ self._min = min
+ self._max = max
+ self._finite = finite if finite is not None else True
+
+ def convert(self, value: str):
+ if value.strip() != value:
+ return None
+
+ try:
+ value = float(value)
+
+ if self._finite and not isfinite(value):
+ return None
+
+ except ValueError:
+ return None
+
+ return self._validate_min_max_value(value)
+
+
class DateTimeConverter(BaseConverter):
"""Converts a field value to a datetime.
@@ -176,5 +214,6 @@
('int', IntConverter),
('dt', DateTimeConverter),
('uuid', UUIDConverter),
+ ('float', FloatConverter),
('path', PathConverter),
)
| {"golden_diff": "diff --git a/falcon/routing/converters.py b/falcon/routing/converters.py\n--- a/falcon/routing/converters.py\n+++ b/falcon/routing/converters.py\n@@ -14,6 +14,7 @@\n \n import abc\n from datetime import datetime\n+from math import isfinite\n import uuid\n \n __all__ = (\n@@ -21,6 +22,7 @@\n 'IntConverter',\n 'DateTimeConverter',\n 'UUIDConverter',\n+ 'FloatConverter',\n )\n \n \n@@ -76,7 +78,6 @@\n def __init__(self, num_digits=None, min=None, max=None):\n if num_digits is not None and num_digits < 1:\n raise ValueError('num_digits must be at least 1')\n-\n self._num_digits = num_digits\n self._min = min\n self._max = max\n@@ -86,10 +87,10 @@\n return None\n \n # NOTE(kgriffs): int() will accept numbers with preceding or\n- # trailing whitespace, so we need to do our own check. Using\n- # strip() is faster than either a regex or a series of or'd\n- # membership checks via \"in\", esp. as the length of contiguous\n- # numbers in the value grows.\n+ # trailing whitespace, so we need to do our own check. Using\n+ # strip() is faster than either a regex or a series of or'd\n+ # membership checks via \"in\", esp. as the length of contiguous\n+ # numbers in the value grows.\n if value.strip() != value:\n return None\n \n@@ -98,15 +99,52 @@\n except ValueError:\n return None\n \n+ return self._validate_min_max_value(value)\n+\n+ def _validate_min_max_value(self, value):\n if self._min is not None and value < self._min:\n return None\n-\n if self._max is not None and value > self._max:\n return None\n \n return value\n \n \n+class FloatConverter(IntConverter):\n+ \"\"\"Converts a field value to an float.\n+\n+ Identifier: `float`\n+ Keyword Args:\n+ min (float): Reject the value if it is less than this number.\n+ max (float): Reject the value if it is greater than this number.\n+ finite (bool) : Determines whether or not to only match ordinary\n+ finite numbers (default: ``True``). Set to ``False`` to match\n+ nan, inf, and -inf in addition to finite numbers.\n+ \"\"\"\n+\n+ __slots__ = '_finite'\n+\n+ def __init__(self, min: float = None, max: float = None, finite: bool = True):\n+ self._min = min\n+ self._max = max\n+ self._finite = finite if finite is not None else True\n+\n+ def convert(self, value: str):\n+ if value.strip() != value:\n+ return None\n+\n+ try:\n+ value = float(value)\n+\n+ if self._finite and not isfinite(value):\n+ return None\n+\n+ except ValueError:\n+ return None\n+\n+ return self._validate_min_max_value(value)\n+\n+\n class DateTimeConverter(BaseConverter):\n \"\"\"Converts a field value to a datetime.\n \n@@ -176,5 +214,6 @@\n ('int', IntConverter),\n ('dt', DateTimeConverter),\n ('uuid', UUIDConverter),\n+ ('float', FloatConverter),\n ('path', PathConverter),\n )\n", "issue": "Implement FloatConverter\nImplement `FloatConverter` along the lines of [`IntConverter`](https://falcon.readthedocs.io/en/stable/api/routing.html#falcon.routing.IntConverter). Draw inspiration from `IntConverter`, or even find an efficient way to share code between the two!\r\n\r\nAdd the new converter to the list of [Built-in Converters](https://falcon.readthedocs.io/en/stable/api/routing.html#built-in-converters) under the `float` identifier.\r\n\r\nOpen questions: should we support converting `nan`, `inf` & `-inf` from path?\n", "before_files": [{"content": "# Copyright 2017 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom datetime import datetime\nimport uuid\n\n__all__ = (\n 'BaseConverter',\n 'IntConverter',\n 'DateTimeConverter',\n 'UUIDConverter',\n)\n\n\n# PERF(kgriffs): Avoid an extra namespace lookup when using this function\nstrptime = datetime.strptime\n\n\nclass BaseConverter(metaclass=abc.ABCMeta):\n \"\"\"Abstract base class for URI template field converters.\"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = False\n \"\"\"When set to ``True`` it indicates that this converter will consume\n multiple URL path segments. Currently a converter with\n ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template\n effectively meaning that it will consume all of the remaining URL path\n segments.\n \"\"\"\n\n @abc.abstractmethod # pragma: no cover\n def convert(self, value):\n \"\"\"Convert a URI template field value to another format or type.\n\n Args:\n value (str or List[str]): Original string to convert.\n If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a\n list of strings containing the path segments matched by\n the converter.\n\n Returns:\n object: Converted field value, or ``None`` if the field\n can not be converted.\n \"\"\"\n\n\ndef _consumes_multiple_segments(converter):\n return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)\n\n\nclass IntConverter(BaseConverter):\n \"\"\"Converts a field value to an int.\n\n Identifier: `int`\n\n Keyword Args:\n num_digits (int): Require the value to have the given\n number of digits.\n min (int): Reject the value if it is less than this number.\n max (int): Reject the value if it is greater than this number.\n \"\"\"\n\n __slots__ = ('_num_digits', '_min', '_max')\n\n def __init__(self, num_digits=None, min=None, max=None):\n if num_digits is not None and num_digits < 1:\n raise ValueError('num_digits must be at least 1')\n\n self._num_digits = num_digits\n self._min = min\n self._max = max\n\n def convert(self, value):\n if self._num_digits is not None and len(value) != self._num_digits:\n return None\n\n # NOTE(kgriffs): int() will accept numbers with preceding or\n # trailing whitespace, so we need to do our own check. Using\n # strip() is faster than either a regex or a series of or'd\n # membership checks via \"in\", esp. as the length of contiguous\n # numbers in the value grows.\n if value.strip() != value:\n return None\n\n try:\n value = int(value)\n except ValueError:\n return None\n\n if self._min is not None and value < self._min:\n return None\n\n if self._max is not None and value > self._max:\n return None\n\n return value\n\n\nclass DateTimeConverter(BaseConverter):\n \"\"\"Converts a field value to a datetime.\n\n Identifier: `dt`\n\n Keyword Args:\n format_string (str): String used to parse the field value\n into a datetime. Any format recognized by strptime() is\n supported (default ``'%Y-%m-%dT%H:%M:%SZ'``).\n \"\"\"\n\n __slots__ = ('_format_string',)\n\n def __init__(self, format_string='%Y-%m-%dT%H:%M:%SZ'):\n self._format_string = format_string\n\n def convert(self, value):\n try:\n return strptime(value, self._format_string)\n except ValueError:\n return None\n\n\nclass UUIDConverter(BaseConverter):\n \"\"\"Converts a field value to a uuid.UUID.\n\n Identifier: `uuid`\n\n In order to be converted, the field value must consist of a\n string of 32 hexadecimal digits, as defined in RFC 4122, Section 3.\n Note, however, that hyphens and the URN prefix are optional.\n \"\"\"\n\n def convert(self, value):\n try:\n return uuid.UUID(value)\n except ValueError:\n return None\n\n\nclass PathConverter(BaseConverter):\n \"\"\"Field converted used to match the rest of the path.\n\n This field converter matches the remainder of the URL path,\n returning it as a string.\n\n This converter is currently supported only when used at the\n end of the URL template.\n\n The classic routing rules of falcon apply also to this converter:\n considering the template ``'/foo/bar/{matched_path:path}'``, the path\n ``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will\n match, producing ``matched_path=''``, when\n :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``\n (the default), while it will *not* match when that option is ``True``.\n\n (See also: :ref:`trailing_slash_in_path`)\n \"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = True\n\n def convert(self, value):\n return '/'.join(value)\n\n\nBUILTIN = (\n ('int', IntConverter),\n ('dt', DateTimeConverter),\n ('uuid', UUIDConverter),\n ('path', PathConverter),\n)\n", "path": "falcon/routing/converters.py"}], "after_files": [{"content": "# Copyright 2017 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom datetime import datetime\nfrom math import isfinite\nimport uuid\n\n__all__ = (\n 'BaseConverter',\n 'IntConverter',\n 'DateTimeConverter',\n 'UUIDConverter',\n 'FloatConverter',\n)\n\n\n# PERF(kgriffs): Avoid an extra namespace lookup when using this function\nstrptime = datetime.strptime\n\n\nclass BaseConverter(metaclass=abc.ABCMeta):\n \"\"\"Abstract base class for URI template field converters.\"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = False\n \"\"\"When set to ``True`` it indicates that this converter will consume\n multiple URL path segments. Currently a converter with\n ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template\n effectively meaning that it will consume all of the remaining URL path\n segments.\n \"\"\"\n\n @abc.abstractmethod # pragma: no cover\n def convert(self, value):\n \"\"\"Convert a URI template field value to another format or type.\n\n Args:\n value (str or List[str]): Original string to convert.\n If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a\n list of strings containing the path segments matched by\n the converter.\n\n Returns:\n object: Converted field value, or ``None`` if the field\n can not be converted.\n \"\"\"\n\n\ndef _consumes_multiple_segments(converter):\n return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)\n\n\nclass IntConverter(BaseConverter):\n \"\"\"Converts a field value to an int.\n\n Identifier: `int`\n\n Keyword Args:\n num_digits (int): Require the value to have the given\n number of digits.\n min (int): Reject the value if it is less than this number.\n max (int): Reject the value if it is greater than this number.\n \"\"\"\n\n __slots__ = ('_num_digits', '_min', '_max')\n\n def __init__(self, num_digits=None, min=None, max=None):\n if num_digits is not None and num_digits < 1:\n raise ValueError('num_digits must be at least 1')\n self._num_digits = num_digits\n self._min = min\n self._max = max\n\n def convert(self, value):\n if self._num_digits is not None and len(value) != self._num_digits:\n return None\n\n # NOTE(kgriffs): int() will accept numbers with preceding or\n # trailing whitespace, so we need to do our own check. Using\n # strip() is faster than either a regex or a series of or'd\n # membership checks via \"in\", esp. as the length of contiguous\n # numbers in the value grows.\n if value.strip() != value:\n return None\n\n try:\n value = int(value)\n except ValueError:\n return None\n\n return self._validate_min_max_value(value)\n\n def _validate_min_max_value(self, value):\n if self._min is not None and value < self._min:\n return None\n if self._max is not None and value > self._max:\n return None\n\n return value\n\n\nclass FloatConverter(IntConverter):\n \"\"\"Converts a field value to an float.\n\n Identifier: `float`\n Keyword Args:\n min (float): Reject the value if it is less than this number.\n max (float): Reject the value if it is greater than this number.\n finite (bool) : Determines whether or not to only match ordinary\n finite numbers (default: ``True``). Set to ``False`` to match\n nan, inf, and -inf in addition to finite numbers.\n \"\"\"\n\n __slots__ = '_finite'\n\n def __init__(self, min: float = None, max: float = None, finite: bool = True):\n self._min = min\n self._max = max\n self._finite = finite if finite is not None else True\n\n def convert(self, value: str):\n if value.strip() != value:\n return None\n\n try:\n value = float(value)\n\n if self._finite and not isfinite(value):\n return None\n\n except ValueError:\n return None\n\n return self._validate_min_max_value(value)\n\n\nclass DateTimeConverter(BaseConverter):\n \"\"\"Converts a field value to a datetime.\n\n Identifier: `dt`\n\n Keyword Args:\n format_string (str): String used to parse the field value\n into a datetime. Any format recognized by strptime() is\n supported (default ``'%Y-%m-%dT%H:%M:%SZ'``).\n \"\"\"\n\n __slots__ = ('_format_string',)\n\n def __init__(self, format_string='%Y-%m-%dT%H:%M:%SZ'):\n self._format_string = format_string\n\n def convert(self, value):\n try:\n return strptime(value, self._format_string)\n except ValueError:\n return None\n\n\nclass UUIDConverter(BaseConverter):\n \"\"\"Converts a field value to a uuid.UUID.\n\n Identifier: `uuid`\n\n In order to be converted, the field value must consist of a\n string of 32 hexadecimal digits, as defined in RFC 4122, Section 3.\n Note, however, that hyphens and the URN prefix are optional.\n \"\"\"\n\n def convert(self, value):\n try:\n return uuid.UUID(value)\n except ValueError:\n return None\n\n\nclass PathConverter(BaseConverter):\n \"\"\"Field converted used to match the rest of the path.\n\n This field converter matches the remainder of the URL path,\n returning it as a string.\n\n This converter is currently supported only when used at the\n end of the URL template.\n\n The classic routing rules of falcon apply also to this converter:\n considering the template ``'/foo/bar/{matched_path:path}'``, the path\n ``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will\n match, producing ``matched_path=''``, when\n :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``\n (the default), while it will *not* match when that option is ``True``.\n\n (See also: :ref:`trailing_slash_in_path`)\n \"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = True\n\n def convert(self, value):\n return '/'.join(value)\n\n\nBUILTIN = (\n ('int', IntConverter),\n ('dt', DateTimeConverter),\n ('uuid', UUIDConverter),\n ('float', FloatConverter),\n ('path', PathConverter),\n)\n", "path": "falcon/routing/converters.py"}]} | 2,119 | 793 |
gh_patches_debug_3172 | rasdani/github-patches | git_diff | napalm-automation__napalm-1985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linting issue with napalm/base/validate.py:165:10 E721 do not compare types, for exact checks use `is` / `is not`, for instance checks use `isinstance()` [pycodestyle]
Last working build
```
Collecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))
Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)
```
failing build
```
Collecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))
Obtaining dependency information for pycodestyle from https://files.pythonhosted.org/packages/31/c2/e1508ed4395793f69e40fd8c6b5a690e1d568e649aae9492076a7b6befb4/pycodestyle-2.11.0-py2.py3-none-any.whl.metadata
Downloading pycodestyle-2.11.0-py2.py3-none-any.whl.metadata (4.5 kB)
```
Guessing we have a new linting rule in pycodestyle 2.11
_Originally posted by @bewing in https://github.com/napalm-automation/napalm/issues/1983#issuecomment-1660467649_
Will provide a fix shortly - please assign this one to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napalm/base/validate.py`
Content:
```
1 """
2 Validation methods for the NAPALM base.
3
4 See: https://napalm.readthedocs.io/en/latest/validate.html
5 """
6 import yaml
7 import copy
8 import re
9 from typing import Dict, List, Union, TypeVar, Optional, TYPE_CHECKING
10
11 if TYPE_CHECKING:
12 from napalm.base import NetworkDriver
13 from napalm.base.exceptions import ValidationException
14 from napalm.base import models
15
16
17 # We put it here to compile it only once
18 numeric_compare_regex = re.compile(r"^(<|>|<=|>=|==|!=)(\d+(\.\d+){0,1})$")
19
20
21 def _get_validation_file(validation_file: str) -> Dict[str, Dict]:
22 try:
23 with open(validation_file, "r") as stream:
24 try:
25 validation_source = yaml.safe_load(stream)
26 except yaml.YAMLError as exc:
27 raise ValidationException(exc)
28 except IOError:
29 raise ValidationException("File {0} not found.".format(validation_file))
30 return validation_source
31
32
33 def _mode(mode_string: str) -> Dict[str, bool]:
34 mode = {"strict": False}
35
36 for m in mode_string.split():
37 if m not in mode.keys():
38 raise ValidationException("mode '{}' not recognized".format(m))
39 mode[m] = True
40 return mode
41
42
43 def _compare_getter_list(
44 src: List, dst: List, mode: Dict[str, bool]
45 ) -> models.ListValidationResult:
46 result: models.ListValidationResult = {
47 "complies": True,
48 "present": [],
49 "missing": [],
50 "extra": [],
51 }
52 for src_element in src:
53 found = False
54
55 i = 0
56 while True:
57 try:
58 intermediate_match = compare(src_element, dst[i])
59 if (
60 isinstance(intermediate_match, dict)
61 and intermediate_match["complies"]
62 or not isinstance(intermediate_match, dict)
63 and intermediate_match
64 ):
65 found = True
66 result["present"].append(src_element)
67 dst.pop(i)
68 break
69 else:
70 i += 1
71 except IndexError:
72 break
73
74 if not found:
75 result["complies"] = False
76 result["missing"].append(src_element)
77
78 if mode["strict"] and dst:
79 result["extra"] = dst
80 result["complies"] = False
81
82 return result
83
84
85 def _compare_getter_dict(
86 src: Dict[str, List], dst: Dict[str, List], mode: Dict[str, bool]
87 ) -> models.DictValidationResult:
88 result: models.DictValidationResult = {
89 "complies": True,
90 "present": {},
91 "missing": [],
92 "extra": [],
93 }
94 dst = copy.deepcopy(dst) # Otherwise we are going to modify a "live" object
95
96 for key, src_element in src.items():
97 try:
98 dst_element = dst.pop(key)
99 result["present"][key] = {}
100 intermediate_result = compare(src_element, dst_element)
101
102 if isinstance(intermediate_result, dict):
103 nested = True
104
105 complies = intermediate_result["complies"]
106
107 if not complies:
108 result["present"][key]["diff"] = intermediate_result
109 else:
110 complies = intermediate_result
111 nested = False
112 if not complies:
113 result["present"][key]["expected_value"] = src_element
114 result["present"][key]["actual_value"] = dst_element
115
116 if not complies:
117 result["complies"] = False
118
119 result["present"][key]["complies"] = complies
120 result["present"][key]["nested"] = nested
121 except KeyError:
122 result["missing"].append(key)
123 result["complies"] = False
124
125 if mode["strict"] and dst:
126 result["extra"] = list(dst.keys())
127 result["complies"] = False
128
129 return result
130
131
132 CompareInput = TypeVar("CompareInput", str, Dict, List)
133
134
135 def compare(
136 src: CompareInput, dst: CompareInput
137 ) -> Union[bool, models.DictValidationResult, models.ListValidationResult]:
138 if isinstance(src, str):
139 src = str(src)
140
141 if isinstance(src, dict):
142 mode = _mode(src.pop("_mode", ""))
143 if "list" in src.keys():
144 if not isinstance(dst, list):
145 # This can happen with nested lists
146 return False
147
148 return _compare_getter_list(src["list"], dst, mode)
149 return _compare_getter_dict(src, dst, mode)
150
151 elif isinstance(src, str):
152 if src.startswith("<") or src.startswith(">"):
153 cmp_result = _compare_numeric(src, dst)
154 return cmp_result
155 elif "<->" in src and len(src.split("<->")) == 2:
156 cmp_result = _compare_range(src, dst)
157 return cmp_result
158 else:
159 m = re.search(src, str(dst))
160 if m:
161 return bool(m)
162 else:
163 return src == dst
164
165 elif type(src) == type(dst) == list:
166 pairs = zip(src, dst)
167 diff_lists = [
168 [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]
169 for x, y in pairs
170 if x != y
171 ]
172 return empty_tree(diff_lists)
173
174 else:
175 return src == dst
176
177
178 def _compare_numeric(src_num: str, dst_num: str) -> bool:
179 """Compare numerical values. You can use '<%d','>%d'."""
180 dst_num = float(dst_num)
181
182 match = numeric_compare_regex.match(src_num)
183 if not match:
184 error = "Failed numeric comparison. Collected: {}. Expected: {}".format(
185 dst_num, src_num
186 )
187 raise ValueError(error)
188
189 operand = {
190 "<": "__lt__",
191 ">": "__gt__",
192 ">=": "__ge__",
193 "<=": "__le__",
194 "==": "__eq__",
195 "!=": "__ne__",
196 }
197 return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))
198
199
200 def _compare_range(src_num: str, dst_num: str) -> bool:
201 """Compare value against a range of values. You can use '%d<->%d'."""
202 dst_num = float(dst_num)
203
204 match = src_num.split("<->")
205 if len(match) != 2:
206 error = "Failed range comparison. Collected: {}. Expected: {}".format(
207 dst_num, src_num
208 )
209 raise ValueError(error)
210
211 if float(match[0]) <= dst_num <= float(match[1]):
212 return True
213 else:
214 return False
215
216
217 def empty_tree(input_list: List) -> bool:
218 """Recursively iterate through values in nested lists."""
219 for item in input_list:
220 if not isinstance(item, list) or not empty_tree(item):
221 return False
222 return True
223
224
225 def compliance_report(
226 cls: "NetworkDriver",
227 validation_file: Optional[str] = None,
228 validation_source: Optional[str] = None,
229 ) -> models.ReportResult:
230 report: models.ReportResult = {} # type: ignore
231 if validation_file:
232 validation_source = _get_validation_file(validation_file) # type: ignore
233
234 # Otherwise we are going to modify a "live" object
235 validation_source = copy.deepcopy(validation_source)
236
237 assert isinstance(validation_source, list), validation_source
238
239 for validation_check in validation_source:
240 for getter, expected_results in validation_check.items():
241 if getter == "get_config":
242 # TBD
243 pass
244 else:
245 key = expected_results.pop("_name", "") or getter
246
247 try:
248 kwargs = expected_results.pop("_kwargs", {})
249 actual_results = getattr(cls, getter)(**kwargs)
250 report[key] = compare(expected_results, actual_results)
251 except NotImplementedError:
252 report[key] = {"skipped": True, "reason": "NotImplemented"}
253
254 complies = all([e.get("complies", True) for e in report.values()])
255 report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)]
256 report["complies"] = complies
257 return report
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napalm/base/validate.py b/napalm/base/validate.py
--- a/napalm/base/validate.py
+++ b/napalm/base/validate.py
@@ -162,7 +162,7 @@
else:
return src == dst
- elif type(src) == type(dst) == list:
+ elif isinstance(src, list) and isinstance(dst, list):
pairs = zip(src, dst)
diff_lists = [
[(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]
| {"golden_diff": "diff --git a/napalm/base/validate.py b/napalm/base/validate.py\n--- a/napalm/base/validate.py\n+++ b/napalm/base/validate.py\n@@ -162,7 +162,7 @@\n else:\n return src == dst\n \n- elif type(src) == type(dst) == list:\n+ elif isinstance(src, list) and isinstance(dst, list):\n pairs = zip(src, dst)\n diff_lists = [\n [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]\n", "issue": "Linting issue with napalm/base/validate.py:165:10 E721 do not compare types, for exact checks use `is` / `is not`, for instance checks use `isinstance()` [pycodestyle]\nLast working build\r\n```\r\nCollecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))\r\n Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)\r\n```\r\n\r\nfailing build\r\n```\r\nCollecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))\r\n Obtaining dependency information for pycodestyle from https://files.pythonhosted.org/packages/31/c2/e1508ed4395793f69e40fd8c6b5a690e1d568e649aae9492076a7b6befb4/pycodestyle-2.11.0-py2.py3-none-any.whl.metadata\r\n Downloading pycodestyle-2.11.0-py2.py3-none-any.whl.metadata (4.5 kB)\r\n```\r\nGuessing we have a new linting rule in pycodestyle 2.11\r\n\r\n_Originally posted by @bewing in https://github.com/napalm-automation/napalm/issues/1983#issuecomment-1660467649_\r\n\r\nWill provide a fix shortly - please assign this one to me.\n", "before_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nimport yaml\nimport copy\nimport re\nfrom typing import Dict, List, Union, TypeVar, Optional, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from napalm.base import NetworkDriver\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base import models\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file: str) -> Dict[str, Dict]:\n try:\n with open(validation_file, \"r\") as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string: str) -> Dict[str, bool]:\n mode = {\"strict\": False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(\n src: List, dst: List, mode: Dict[str, bool]\n) -> models.ListValidationResult:\n result: models.ListValidationResult = {\n \"complies\": True,\n \"present\": [],\n \"missing\": [],\n \"extra\": [],\n }\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if (\n isinstance(intermediate_match, dict)\n and intermediate_match[\"complies\"]\n or not isinstance(intermediate_match, dict)\n and intermediate_match\n ):\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(\n src: Dict[str, List], dst: Dict[str, List], mode: Dict[str, bool]\n) -> models.DictValidationResult:\n result: models.DictValidationResult = {\n \"complies\": True,\n \"present\": {},\n \"missing\": [],\n \"extra\": [],\n }\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key][\"diff\"] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\nCompareInput = TypeVar(\"CompareInput\", str, Dict, List)\n\n\ndef compare(\n src: CompareInput, dst: CompareInput\n) -> Union[bool, models.DictValidationResult, models.ListValidationResult]:\n if isinstance(src, str):\n src = str(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop(\"_mode\", \"\"))\n if \"list\" in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src[\"list\"], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, str):\n if src.startswith(\"<\") or src.startswith(\">\"):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n elif \"<->\" in src and len(src.split(\"<->\")) == 2:\n cmp_result = _compare_range(src, dst)\n return cmp_result\n else:\n m = re.search(src, str(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif type(src) == type(dst) == list:\n pairs = zip(src, dst)\n diff_lists = [\n [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]\n for x, y in pairs\n if x != y\n ]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef _compare_range(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare value against a range of values. You can use '%d<->%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = src_num.split(\"<->\")\n if len(match) != 2:\n error = \"Failed range comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n if float(match[0]) <= dst_num <= float(match[1]):\n return True\n else:\n return False\n\n\ndef empty_tree(input_list: List) -> bool:\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(\n cls: \"NetworkDriver\",\n validation_file: Optional[str] = None,\n validation_source: Optional[str] = None,\n) -> models.ReportResult:\n report: models.ReportResult = {} # type: ignore\n if validation_file:\n validation_source = _get_validation_file(validation_file) # type: ignore\n\n # Otherwise we are going to modify a \"live\" object\n validation_source = copy.deepcopy(validation_source)\n\n assert isinstance(validation_source, list), validation_source\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop(\"_kwargs\", {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}], "after_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nimport yaml\nimport copy\nimport re\nfrom typing import Dict, List, Union, TypeVar, Optional, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from napalm.base import NetworkDriver\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base import models\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file: str) -> Dict[str, Dict]:\n try:\n with open(validation_file, \"r\") as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string: str) -> Dict[str, bool]:\n mode = {\"strict\": False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(\n src: List, dst: List, mode: Dict[str, bool]\n) -> models.ListValidationResult:\n result: models.ListValidationResult = {\n \"complies\": True,\n \"present\": [],\n \"missing\": [],\n \"extra\": [],\n }\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if (\n isinstance(intermediate_match, dict)\n and intermediate_match[\"complies\"]\n or not isinstance(intermediate_match, dict)\n and intermediate_match\n ):\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(\n src: Dict[str, List], dst: Dict[str, List], mode: Dict[str, bool]\n) -> models.DictValidationResult:\n result: models.DictValidationResult = {\n \"complies\": True,\n \"present\": {},\n \"missing\": [],\n \"extra\": [],\n }\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key][\"diff\"] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\nCompareInput = TypeVar(\"CompareInput\", str, Dict, List)\n\n\ndef compare(\n src: CompareInput, dst: CompareInput\n) -> Union[bool, models.DictValidationResult, models.ListValidationResult]:\n if isinstance(src, str):\n src = str(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop(\"_mode\", \"\"))\n if \"list\" in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src[\"list\"], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, str):\n if src.startswith(\"<\") or src.startswith(\">\"):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n elif \"<->\" in src and len(src.split(\"<->\")) == 2:\n cmp_result = _compare_range(src, dst)\n return cmp_result\n else:\n m = re.search(src, str(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif isinstance(src, list) and isinstance(dst, list):\n pairs = zip(src, dst)\n diff_lists = [\n [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]\n for x, y in pairs\n if x != y\n ]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef _compare_range(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare value against a range of values. You can use '%d<->%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = src_num.split(\"<->\")\n if len(match) != 2:\n error = \"Failed range comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n if float(match[0]) <= dst_num <= float(match[1]):\n return True\n else:\n return False\n\n\ndef empty_tree(input_list: List) -> bool:\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(\n cls: \"NetworkDriver\",\n validation_file: Optional[str] = None,\n validation_source: Optional[str] = None,\n) -> models.ReportResult:\n report: models.ReportResult = {} # type: ignore\n if validation_file:\n validation_source = _get_validation_file(validation_file) # type: ignore\n\n # Otherwise we are going to modify a \"live\" object\n validation_source = copy.deepcopy(validation_source)\n\n assert isinstance(validation_source, list), validation_source\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop(\"_kwargs\", {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}]} | 3,076 | 127 |
gh_patches_debug_29035 | rasdani/github-patches | git_diff | streamlit__streamlit-3495 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto-Complete Password with Password Manager
### Summary
When filling a username/password combo from a password manager, it only fills the username, not the password, because the HTML password field has the property `autocomplete="new-password"`. Removing this property results in the expected behaviour.
### Steps to reproduce
Code snippet:
```python
import streamlit as st
sender_email = st.text_input('username', value='user', key='email')
sender_password = st.text_input('password', type='password', key='password')
if st.button('Login'):
st.success('Logged in')
else:
st.write('Not logged in')
```
1. Run the code
2. Notice that 1Password 7 displays for the user field the saved password and for the password field only suggests a new password
3. Fill saved password from the user field. Notice that the password field does not get filled.
4. Reload the page.
5. Remove the `autocomplete="new-password"` property with the web inspector.
6. Fill the password from the user field. Notice that both fields get filled (as expected).
**Expected behavior:**
The password manager fills both fields
**Actual behavior:**
The password manager only fills the user field.
### Is this a regression?
Don't know.
### Debug info
- Streamlit version: 0.79.0
- Python version: 3.9.2
- Using Conda? PipEnv? PyEnv? Pex? => Conda
- OS version: macOS 10.15.7
- Browser version: Tested with Safari 14.0.3 and Chrome 89.0.4389.90
Is there a workaround to remove this HTML property? Or does it required a code change within Streamlit?
For my tool the user needs to provide a API authentication for the backend (username/password), which will be stored in a password manager (1Password 7) in this case. It is inconvenient having to copy the password manually.
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/text_widgets.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import cast
16
17 import streamlit
18 from streamlit.errors import StreamlitAPIException
19 from streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto
20 from streamlit.proto.TextInput_pb2 import TextInput as TextInputProto
21 from streamlit.state.widgets import register_widget
22 from .form import current_form_id
23 from .utils import check_callback_rules, check_session_state_rules
24
25
26 class TextWidgetsMixin:
27 def text_input(
28 self,
29 label,
30 value="",
31 max_chars=None,
32 key=None,
33 type="default",
34 help=None,
35 on_change=None,
36 args=None,
37 kwargs=None,
38 ):
39 """Display a single-line text input widget.
40
41 Parameters
42 ----------
43 label : str
44 A short label explaining to the user what this input is for.
45 value : any
46 The text value of this widget when it first renders. This will be
47 cast to str internally.
48 max_chars : int or None
49 Max number of characters allowed in text input.
50 key : str
51 An optional string to use as the unique key for the widget.
52 If this is omitted, a key will be generated for the widget
53 based on its content. Multiple widgets of the same type may
54 not share the same key.
55 type : str
56 The type of the text input. This can be either "default" (for
57 a regular text input), or "password" (for a text input that
58 masks the user's typed value). Defaults to "default".
59 help : str
60 An optional tooltip that gets displayed next to the input.
61 on_change : callable
62 An optional callback invoked when this text_input's value changes.
63 args : tuple
64 An optional tuple of args to pass to the callback.
65 kwargs : dict
66 An optional dict of kwargs to pass to the callback.
67
68 Returns
69 -------
70 str
71 The current value of the text input widget.
72
73 Example
74 -------
75 >>> title = st.text_input('Movie title', 'Life of Brian')
76 >>> st.write('The current movie title is', title)
77
78 """
79 check_callback_rules(self.dg, on_change)
80 check_session_state_rules(default_value=None if value == "" else value, key=key)
81
82 text_input_proto = TextInputProto()
83 text_input_proto.label = label
84 text_input_proto.default = str(value)
85 text_input_proto.form_id = current_form_id(self.dg)
86 if help is not None:
87 text_input_proto.help = help
88
89 if max_chars is not None:
90 text_input_proto.max_chars = max_chars
91
92 if type == "default":
93 text_input_proto.type = TextInputProto.DEFAULT
94 elif type == "password":
95 text_input_proto.type = TextInputProto.PASSWORD
96 else:
97 raise StreamlitAPIException(
98 "'%s' is not a valid text_input type. Valid types are 'default' and 'password'."
99 % type
100 )
101
102 def deserialize_text_input(ui_value) -> str:
103 return str(ui_value if ui_value is not None else value)
104
105 current_value, set_frontend_value = register_widget(
106 "text_input",
107 text_input_proto,
108 user_key=key,
109 on_change_handler=on_change,
110 args=args,
111 kwargs=kwargs,
112 deserializer=deserialize_text_input,
113 serializer=lambda x: x,
114 )
115
116 if set_frontend_value:
117 text_input_proto.value = current_value
118 text_input_proto.set_value = True
119
120 self.dg._enqueue("text_input", text_input_proto)
121 return current_value
122
123 def text_area(
124 self,
125 label,
126 value="",
127 height=None,
128 max_chars=None,
129 key=None,
130 help=None,
131 on_change=None,
132 args=None,
133 kwargs=None,
134 ):
135 """Display a multi-line text input widget.
136
137 Parameters
138 ----------
139 label : str
140 A short label explaining to the user what this input is for.
141 value : any
142 The text value of this widget when it first renders. This will be
143 cast to str internally.
144 height : int or None
145 Desired height of the UI element expressed in pixels. If None, a
146 default height is used.
147 max_chars : int or None
148 Maximum number of characters allowed in text area.
149 key : str
150 An optional string to use as the unique key for the widget.
151 If this is omitted, a key will be generated for the widget
152 based on its content. Multiple widgets of the same type may
153 not share the same key.
154 help : str
155 An optional tooltip that gets displayed next to the textarea.
156 on_change : callable
157 An optional callback invoked when this text_area's value changes.
158 args : tuple
159 An optional tuple of args to pass to the callback.
160 kwargs : dict
161 An optional dict of kwargs to pass to the callback.
162
163 Returns
164 -------
165 str
166 The current value of the text input widget.
167
168 Example
169 -------
170 >>> txt = st.text_area('Text to analyze', '''
171 ... It was the best of times, it was the worst of times, it was
172 ... the age of wisdom, it was the age of foolishness, it was
173 ... the epoch of belief, it was the epoch of incredulity, it
174 ... was the season of Light, it was the season of Darkness, it
175 ... was the spring of hope, it was the winter of despair, (...)
176 ... ''')
177 >>> st.write('Sentiment:', run_sentiment_analysis(txt))
178
179 """
180 check_callback_rules(self.dg, on_change)
181 check_session_state_rules(default_value=None if value == "" else value, key=key)
182
183 text_area_proto = TextAreaProto()
184 text_area_proto.label = label
185 text_area_proto.default = str(value)
186 text_area_proto.form_id = current_form_id(self.dg)
187 if help is not None:
188 text_area_proto.help = help
189
190 if height is not None:
191 text_area_proto.height = height
192
193 if max_chars is not None:
194 text_area_proto.max_chars = max_chars
195
196 def deserialize_text_area(ui_value) -> str:
197 return str(ui_value if ui_value is not None else value)
198
199 current_value, set_frontend_value = register_widget(
200 "text_area",
201 text_area_proto,
202 user_key=key,
203 on_change_handler=on_change,
204 args=args,
205 kwargs=kwargs,
206 deserializer=deserialize_text_area,
207 serializer=lambda x: x,
208 )
209
210 if set_frontend_value:
211 text_area_proto.value = current_value
212 text_area_proto.set_value = True
213
214 self.dg._enqueue("text_area", text_area_proto)
215 return current_value
216
217 @property
218 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
219 """Get our DeltaGenerator."""
220 return cast("streamlit.delta_generator.DeltaGenerator", self)
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/elements/text_widgets.py b/lib/streamlit/elements/text_widgets.py
--- a/lib/streamlit/elements/text_widgets.py
+++ b/lib/streamlit/elements/text_widgets.py
@@ -32,6 +32,7 @@
key=None,
type="default",
help=None,
+ autocomplete=None,
on_change=None,
args=None,
kwargs=None,
@@ -58,6 +59,11 @@
masks the user's typed value). Defaults to "default".
help : str
An optional tooltip that gets displayed next to the input.
+ autocomplete : str
+ An optional value that will be passed to the <input> element's
+ autocomplete property. If unspecified, this value will be set to
+ "new-password" for "password" inputs, and the empty string for
+ "default" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete
on_change : callable
An optional callback invoked when this text_input's value changes.
args : tuple
@@ -99,6 +105,12 @@
% type
)
+ # Marshall the autocomplete param. If unspecified, this will be
+ # set to "new-password" for password inputs.
+ if autocomplete is None:
+ autocomplete = "new-password" if type == "password" else ""
+ text_input_proto.autocomplete = autocomplete
+
def deserialize_text_input(ui_value) -> str:
return str(ui_value if ui_value is not None else value)
| {"golden_diff": "diff --git a/lib/streamlit/elements/text_widgets.py b/lib/streamlit/elements/text_widgets.py\n--- a/lib/streamlit/elements/text_widgets.py\n+++ b/lib/streamlit/elements/text_widgets.py\n@@ -32,6 +32,7 @@\n key=None,\n type=\"default\",\n help=None,\n+ autocomplete=None,\n on_change=None,\n args=None,\n kwargs=None,\n@@ -58,6 +59,11 @@\n masks the user's typed value). Defaults to \"default\".\n help : str\n An optional tooltip that gets displayed next to the input.\n+ autocomplete : str\n+ An optional value that will be passed to the <input> element's\n+ autocomplete property. If unspecified, this value will be set to\n+ \"new-password\" for \"password\" inputs, and the empty string for\n+ \"default\" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete\n on_change : callable\n An optional callback invoked when this text_input's value changes.\n args : tuple\n@@ -99,6 +105,12 @@\n % type\n )\n \n+ # Marshall the autocomplete param. If unspecified, this will be\n+ # set to \"new-password\" for password inputs.\n+ if autocomplete is None:\n+ autocomplete = \"new-password\" if type == \"password\" else \"\"\n+ text_input_proto.autocomplete = autocomplete\n+\n def deserialize_text_input(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n", "issue": "Auto-Complete Password with Password Manager\n### Summary\r\n\r\nWhen filling a username/password combo from a password manager, it only fills the username, not the password, because the HTML password field has the property `autocomplete=\"new-password\"`. Removing this property results in the expected behaviour.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```python\r\nimport streamlit as st\r\n\r\nsender_email = st.text_input('username', value='user', key='email')\r\nsender_password = st.text_input('password', type='password', key='password')\r\nif st.button('Login'):\r\n st.success('Logged in')\r\nelse:\r\n st.write('Not logged in')\r\n```\r\n\r\n1. Run the code\r\n2. Notice that 1Password 7 displays for the user field the saved password and for the password field only suggests a new password\r\n3. Fill saved password from the user field. Notice that the password field does not get filled.\r\n4. Reload the page.\r\n5. Remove the `autocomplete=\"new-password\"` property with the web inspector.\r\n6. Fill the password from the user field. Notice that both fields get filled (as expected).\r\n\r\n**Expected behavior:**\r\n\r\nThe password manager fills both fields\r\n\r\n**Actual behavior:**\r\n\r\nThe password manager only fills the user field.\r\n\r\n### Is this a regression?\r\n\r\nDon't know.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.79.0\r\n- Python version: 3.9.2\r\n- Using Conda? PipEnv? PyEnv? Pex? => Conda\r\n- OS version: macOS 10.15.7\r\n- Browser version: Tested with Safari 14.0.3 and Chrome 89.0.4389.90\r\n\r\n\r\n\r\nIs there a workaround to remove this HTML property? Or does it required a code change within Streamlit?\r\nFor my tool the user needs to provide a API authentication for the backend (username/password), which will be stored in a password manager (1Password 7) in this case. It is inconvenient having to copy the password manually.\r\n\r\nThank you!\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto\nfrom streamlit.proto.TextInput_pb2 import TextInput as TextInputProto\nfrom streamlit.state.widgets import register_widget\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass TextWidgetsMixin:\n def text_input(\n self,\n label,\n value=\"\",\n max_chars=None,\n key=None,\n type=\"default\",\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a single-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n max_chars : int or None\n Max number of characters allowed in text input.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n type : str\n The type of the text input. This can be either \"default\" (for\n a regular text input), or \"password\" (for a text input that\n masks the user's typed value). Defaults to \"default\".\n help : str\n An optional tooltip that gets displayed next to the input.\n on_change : callable\n An optional callback invoked when this text_input's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> title = st.text_input('Movie title', 'Life of Brian')\n >>> st.write('The current movie title is', title)\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_input_proto = TextInputProto()\n text_input_proto.label = label\n text_input_proto.default = str(value)\n text_input_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_input_proto.help = help\n\n if max_chars is not None:\n text_input_proto.max_chars = max_chars\n\n if type == \"default\":\n text_input_proto.type = TextInputProto.DEFAULT\n elif type == \"password\":\n text_input_proto.type = TextInputProto.PASSWORD\n else:\n raise StreamlitAPIException(\n \"'%s' is not a valid text_input type. Valid types are 'default' and 'password'.\"\n % type\n )\n\n def deserialize_text_input(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_input\",\n text_input_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_input,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_input_proto.value = current_value\n text_input_proto.set_value = True\n\n self.dg._enqueue(\"text_input\", text_input_proto)\n return current_value\n\n def text_area(\n self,\n label,\n value=\"\",\n height=None,\n max_chars=None,\n key=None,\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a multi-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n height : int or None\n Desired height of the UI element expressed in pixels. If None, a\n default height is used.\n max_chars : int or None\n Maximum number of characters allowed in text area.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the textarea.\n on_change : callable\n An optional callback invoked when this text_area's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> txt = st.text_area('Text to analyze', '''\n ... It was the best of times, it was the worst of times, it was\n ... the age of wisdom, it was the age of foolishness, it was\n ... the epoch of belief, it was the epoch of incredulity, it\n ... was the season of Light, it was the season of Darkness, it\n ... was the spring of hope, it was the winter of despair, (...)\n ... ''')\n >>> st.write('Sentiment:', run_sentiment_analysis(txt))\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_area_proto = TextAreaProto()\n text_area_proto.label = label\n text_area_proto.default = str(value)\n text_area_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_area_proto.help = help\n\n if height is not None:\n text_area_proto.height = height\n\n if max_chars is not None:\n text_area_proto.max_chars = max_chars\n\n def deserialize_text_area(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_area\",\n text_area_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_area,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_area_proto.value = current_value\n text_area_proto.set_value = True\n\n self.dg._enqueue(\"text_area\", text_area_proto)\n return current_value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/text_widgets.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto\nfrom streamlit.proto.TextInput_pb2 import TextInput as TextInputProto\nfrom streamlit.state.widgets import register_widget\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass TextWidgetsMixin:\n def text_input(\n self,\n label,\n value=\"\",\n max_chars=None,\n key=None,\n type=\"default\",\n help=None,\n autocomplete=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a single-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n max_chars : int or None\n Max number of characters allowed in text input.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n type : str\n The type of the text input. This can be either \"default\" (for\n a regular text input), or \"password\" (for a text input that\n masks the user's typed value). Defaults to \"default\".\n help : str\n An optional tooltip that gets displayed next to the input.\n autocomplete : str\n An optional value that will be passed to the <input> element's\n autocomplete property. If unspecified, this value will be set to\n \"new-password\" for \"password\" inputs, and the empty string for\n \"default\" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete\n on_change : callable\n An optional callback invoked when this text_input's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> title = st.text_input('Movie title', 'Life of Brian')\n >>> st.write('The current movie title is', title)\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_input_proto = TextInputProto()\n text_input_proto.label = label\n text_input_proto.default = str(value)\n text_input_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_input_proto.help = help\n\n if max_chars is not None:\n text_input_proto.max_chars = max_chars\n\n if type == \"default\":\n text_input_proto.type = TextInputProto.DEFAULT\n elif type == \"password\":\n text_input_proto.type = TextInputProto.PASSWORD\n else:\n raise StreamlitAPIException(\n \"'%s' is not a valid text_input type. Valid types are 'default' and 'password'.\"\n % type\n )\n\n # Marshall the autocomplete param. If unspecified, this will be\n # set to \"new-password\" for password inputs.\n if autocomplete is None:\n autocomplete = \"new-password\" if type == \"password\" else \"\"\n text_input_proto.autocomplete = autocomplete\n\n def deserialize_text_input(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_input\",\n text_input_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_input,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_input_proto.value = current_value\n text_input_proto.set_value = True\n\n self.dg._enqueue(\"text_input\", text_input_proto)\n return current_value\n\n def text_area(\n self,\n label,\n value=\"\",\n height=None,\n max_chars=None,\n key=None,\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a multi-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n height : int or None\n Desired height of the UI element expressed in pixels. If None, a\n default height is used.\n max_chars : int or None\n Maximum number of characters allowed in text area.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the textarea.\n on_change : callable\n An optional callback invoked when this text_area's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> txt = st.text_area('Text to analyze', '''\n ... It was the best of times, it was the worst of times, it was\n ... the age of wisdom, it was the age of foolishness, it was\n ... the epoch of belief, it was the epoch of incredulity, it\n ... was the season of Light, it was the season of Darkness, it\n ... was the spring of hope, it was the winter of despair, (...)\n ... ''')\n >>> st.write('Sentiment:', run_sentiment_analysis(txt))\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_area_proto = TextAreaProto()\n text_area_proto.label = label\n text_area_proto.default = str(value)\n text_area_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_area_proto.help = help\n\n if height is not None:\n text_area_proto.height = height\n\n if max_chars is not None:\n text_area_proto.max_chars = max_chars\n\n def deserialize_text_area(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_area\",\n text_area_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_area,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_area_proto.value = current_value\n text_area_proto.set_value = True\n\n self.dg._enqueue(\"text_area\", text_area_proto)\n return current_value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/text_widgets.py"}]} | 2,868 | 346 |
gh_patches_debug_29113 | rasdani/github-patches | git_diff | oppia__oppia-8108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CircleCi tests are failing unexpectedly while downloading node modules
Screenshot:

Link:
https://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
CircleCi tests are failing unexpectedly while downloading node modules
Screenshot:

Link:
https://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/setup.py`
Content:
```
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the 'License');
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an 'AS-IS' BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Python execution environent set up for all scripts."""
16
17 from __future__ import absolute_import # pylint: disable=import-only-modules
18 from __future__ import unicode_literals # pylint: disable=import-only-modules
19
20 import argparse
21 import os
22 import sys
23 import tarfile
24
25 import python_utils
26
27 from . import clean
28 from . import common
29
30 _PARSER = argparse.ArgumentParser(description="""
31 Python execution environent set up for all scripts.
32 """)
33
34
35 def create_directory(directory_path):
36 """Creates a new directory. Does not do anything if directory already
37 exists.
38
39 Args:
40 directory_path: str. Directory path to be created.
41 """
42 if os.path.exists(directory_path):
43 return
44 os.makedirs(directory_path)
45
46
47 # This function takes a command for python as its only input.
48 # It checks this input for a specific version of python and returns false
49 # if it does not match the expected prefix.
50 def test_python_version():
51 running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)
52 if running_python_version != '2.7':
53 python_utils.PRINT('Please use Python2.7. Exiting...')
54 # If OS is Windows, print helpful error message about adding Python to
55 # path.
56 os_info = os.uname()
57 if os_info[0] != 'Darwin' and os_info[0] != 'Linux':
58 common.print_each_string_after_two_new_lines([
59 'It looks like you are using Windows. If you have Python '
60 'installed,',
61 'make sure it is in your PATH and that PYTHONPATH is set.',
62 'If you have two versions of Python (ie, Python 2.7 and 3), '
63 'specify 2.7 before other versions of Python when setting the '
64 'PATH.',
65 'Here are some helpful articles:',
66 'http://docs.python-guide.org/en/latest/starting/install/win/',
67 'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'
68 'pythonpath-in-windows-7'])
69 # Exit when no suitable Python environment can be found.
70 raise Exception
71
72
73 def download_and_install_package(url_to_retrieve, filename):
74 """Downloads and installs package in Oppia tools directory.
75
76 Args:
77 url_to_retrieve: string. The url from which package is to be
78 downloaded.
79 filename: string. The name of the tar file.
80 """
81 python_utils.url_retrieve(url_to_retrieve, filename=filename)
82 tar = tarfile.open(name=filename)
83 tar.extractall(path=common.OPPIA_TOOLS_DIR)
84 tar.close()
85 os.remove(filename)
86
87
88 def main(args=None):
89 """Runs the script to setup Oppia."""
90 unused_parsed_args = _PARSER.parse_args(args=args)
91 test_python_version()
92
93 # The second option allows this script to also be run from deployment
94 # folders.
95 if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(
96 'deploy-'):
97 python_utils.PRINT('')
98 python_utils.PRINT(
99 'WARNING This script should be run from the oppia/ root folder.')
100 python_utils.PRINT('')
101 raise Exception
102
103 # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This
104 # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)
105 # may use it in a situation where relative paths won't work as expected(such
106 # as $PYTHONPATH).
107 create_directory(common.OPPIA_TOOLS_DIR)
108 create_directory(common.THIRD_PARTY_DIR)
109 create_directory(common.NODE_MODULES_PATH)
110
111 os_info = os.uname()
112 if os_info[0] != 'Darwin' and os_info[0] != 'Linux':
113 # Node is a requirement for all installation scripts. Here, we check if
114 # the OS supports node.js installation; if not, we exit with an error.
115 common.print_each_string_after_two_new_lines([
116 'WARNING: Unsupported OS for installation of node.js.',
117 'If you are running this script on Windows, see the instructions',
118 'here regarding installation of node.js:',
119 'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'
120 '%29',
121 'STATUS: Installation completed except for node.js. Exiting.'])
122 raise Exception
123
124 # Download and install node.js.
125 python_utils.PRINT(
126 'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)
127 if not os.path.exists(common.NODE_PATH):
128 python_utils.PRINT('Installing Node.js')
129 if os_info[0] == 'Darwin':
130 if os_info[4] == 'x86_64':
131 node_file_name = 'node-v10.15.3-darwin-x64'
132 else:
133 node_file_name = 'node-v10.15.3-darwin-x86'
134 elif os_info[0] == 'Linux':
135 if os_info[4] == 'x86_64':
136 node_file_name = 'node-v10.15.3-linux-x64'
137 else:
138 node_file_name = 'node-v10.15.3-linux-x86'
139
140 download_and_install_package(
141 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,
142 'node-download.tgz')
143 os.rename(
144 os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
145 common.NODE_PATH)
146
147 # Change ownership of node_modules.
148 # Note: on some machines, these commands seem to take quite a long time.
149 common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
150 common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)
151
152 # Download and install yarn.
153 python_utils.PRINT(
154 'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)
155 if not os.path.exists(common.YARN_PATH):
156 python_utils.PRINT('Removing package-lock.json')
157 clean.delete_file('package-lock.json')
158 common.print_each_string_after_two_new_lines([
159 'Installing yarn',
160 'WARNING: Please note that Oppia uses Yarn to manage node packages',
161 'do *NOT* use npm. For more information on how to use yarn,',
162 'visit https://yarnpkg.com/en/docs/usage.'])
163
164 # NB: Update .yarnrc if the yarn version below is changed.
165 yarn_version = 'v1.17.3'
166 yarn_file_name = 'yarn-%s.tar.gz' % yarn_version
167 download_and_install_package(
168 'https://github.com/yarnpkg/yarn/releases/download/%s/%s'
169 % (yarn_version, yarn_file_name), yarn_file_name)
170
171 # Adjust path to support the default Chrome locations for Unix, Windows and
172 # Mac OS.
173 if os.environ.get('TRAVIS'):
174 chrome_bin = '/usr/bin/chromium-browser'
175 elif os.path.isfile('/usr/bin/google-chrome'):
176 # Unix.
177 chrome_bin = '/usr/bin/google-chrome'
178 elif os.path.isfile('/usr/bin/chromium-browser'):
179 # Unix.
180 chrome_bin = '/usr/bin/chromium-browser'
181 elif os.path.isfile(
182 '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
183 # Windows.
184 chrome_bin = (
185 '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
186 elif os.path.isfile(
187 '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
188 # WSL.
189 chrome_bin = (
190 '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
191 elif os.path.isfile(
192 '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):
193 # Mac OS.
194 chrome_bin = (
195 '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
196 else:
197 python_utils.PRINT('Chrome is not found, stopping ...')
198 raise Exception
199
200 os.environ['CHROME_BIN'] = chrome_bin
201 python_utils.PRINT('Environment setup completed.')
202
203
204 # The 'no coverage' pragma is used as this line is un-testable. This is because
205 # it will only be called when setup.py is used as a script.
206 if __name__ == '__main__': # pragma: no cover
207 main()
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/setup.py b/scripts/setup.py
--- a/scripts/setup.py
+++ b/scripts/setup.py
@@ -19,6 +19,7 @@
import argparse
import os
+import subprocess
import sys
import tarfile
@@ -130,12 +131,12 @@
if os_info[4] == 'x86_64':
node_file_name = 'node-v10.15.3-darwin-x64'
else:
- node_file_name = 'node-v10.15.3-darwin-x86'
+ node_file_name = 'node-v10.15.3'
elif os_info[0] == 'Linux':
if os_info[4] == 'x86_64':
node_file_name = 'node-v10.15.3-linux-x64'
else:
- node_file_name = 'node-v10.15.3-linux-x86'
+ node_file_name = 'node-v10.15.3'
download_and_install_package(
'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,
@@ -144,6 +145,11 @@
os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
common.NODE_PATH)
+ if node_file_name == 'node-v10.15.3':
+ with common.CD(common.NODE_PATH):
+ subprocess.check_call(['./configure'])
+ subprocess.check_call(['make'])
+
# Change ownership of node_modules.
# Note: on some machines, these commands seem to take quite a long time.
common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
| {"golden_diff": "diff --git a/scripts/setup.py b/scripts/setup.py\n--- a/scripts/setup.py\n+++ b/scripts/setup.py\n@@ -19,6 +19,7 @@\n \n import argparse\n import os\n+import subprocess\n import sys\n import tarfile\n \n@@ -130,12 +131,12 @@\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-darwin-x64'\n else:\n- node_file_name = 'node-v10.15.3-darwin-x86'\n+ node_file_name = 'node-v10.15.3'\n elif os_info[0] == 'Linux':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-linux-x64'\n else:\n- node_file_name = 'node-v10.15.3-linux-x86'\n+ node_file_name = 'node-v10.15.3'\n \n download_and_install_package(\n 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,\n@@ -144,6 +145,11 @@\n os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),\n common.NODE_PATH)\n \n+ if node_file_name == 'node-v10.15.3':\n+ with common.CD(common.NODE_PATH):\n+ subprocess.check_call(['./configure'])\n+ subprocess.check_call(['make'])\n+\n # Change ownership of node_modules.\n # Note: on some machines, these commands seem to take quite a long time.\n common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)\n", "issue": "CircleCi tests are failing unexpectedly while downloading node modules\nScreenshot:\r\n\r\n\r\nLink:\r\nhttps://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link\r\n\nCircleCi tests are failing unexpectedly while downloading node modules\nScreenshot:\r\n\r\n\r\nLink:\r\nhttps://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link\r\n\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS-IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python execution environent set up for all scripts.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport os\nimport sys\nimport tarfile\n\nimport python_utils\n\nfrom . import clean\nfrom . import common\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nPython execution environent set up for all scripts.\n\"\"\")\n\n\ndef create_directory(directory_path):\n \"\"\"Creates a new directory. Does not do anything if directory already\n exists.\n\n Args:\n directory_path: str. Directory path to be created.\n \"\"\"\n if os.path.exists(directory_path):\n return\n os.makedirs(directory_path)\n\n\n# This function takes a command for python as its only input.\n# It checks this input for a specific version of python and returns false\n# if it does not match the expected prefix.\ndef test_python_version():\n running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)\n if running_python_version != '2.7':\n python_utils.PRINT('Please use Python2.7. Exiting...')\n # If OS is Windows, print helpful error message about adding Python to\n # path.\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n common.print_each_string_after_two_new_lines([\n 'It looks like you are using Windows. If you have Python '\n 'installed,',\n 'make sure it is in your PATH and that PYTHONPATH is set.',\n 'If you have two versions of Python (ie, Python 2.7 and 3), '\n 'specify 2.7 before other versions of Python when setting the '\n 'PATH.',\n 'Here are some helpful articles:',\n 'http://docs.python-guide.org/en/latest/starting/install/win/',\n 'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'\n 'pythonpath-in-windows-7'])\n # Exit when no suitable Python environment can be found.\n raise Exception\n\n\ndef download_and_install_package(url_to_retrieve, filename):\n \"\"\"Downloads and installs package in Oppia tools directory.\n\n Args:\n url_to_retrieve: string. The url from which package is to be\n downloaded.\n filename: string. The name of the tar file.\n \"\"\"\n python_utils.url_retrieve(url_to_retrieve, filename=filename)\n tar = tarfile.open(name=filename)\n tar.extractall(path=common.OPPIA_TOOLS_DIR)\n tar.close()\n os.remove(filename)\n\n\ndef main(args=None):\n \"\"\"Runs the script to setup Oppia.\"\"\"\n unused_parsed_args = _PARSER.parse_args(args=args)\n test_python_version()\n\n # The second option allows this script to also be run from deployment\n # folders.\n if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(\n 'deploy-'):\n python_utils.PRINT('')\n python_utils.PRINT(\n 'WARNING This script should be run from the oppia/ root folder.')\n python_utils.PRINT('')\n raise Exception\n\n # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This\n # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)\n # may use it in a situation where relative paths won't work as expected(such\n # as $PYTHONPATH).\n create_directory(common.OPPIA_TOOLS_DIR)\n create_directory(common.THIRD_PARTY_DIR)\n create_directory(common.NODE_MODULES_PATH)\n\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n # Node is a requirement for all installation scripts. Here, we check if\n # the OS supports node.js installation; if not, we exit with an error.\n common.print_each_string_after_two_new_lines([\n 'WARNING: Unsupported OS for installation of node.js.',\n 'If you are running this script on Windows, see the instructions',\n 'here regarding installation of node.js:',\n 'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'\n '%29',\n 'STATUS: Installation completed except for node.js. Exiting.'])\n raise Exception\n\n # Download and install node.js.\n python_utils.PRINT(\n 'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.NODE_PATH):\n python_utils.PRINT('Installing Node.js')\n if os_info[0] == 'Darwin':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-darwin-x64'\n else:\n node_file_name = 'node-v10.15.3-darwin-x86'\n elif os_info[0] == 'Linux':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-linux-x64'\n else:\n node_file_name = 'node-v10.15.3-linux-x86'\n\n download_and_install_package(\n 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,\n 'node-download.tgz')\n os.rename(\n os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),\n common.NODE_PATH)\n\n # Change ownership of node_modules.\n # Note: on some machines, these commands seem to take quite a long time.\n common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)\n common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)\n\n # Download and install yarn.\n python_utils.PRINT(\n 'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.YARN_PATH):\n python_utils.PRINT('Removing package-lock.json')\n clean.delete_file('package-lock.json')\n common.print_each_string_after_two_new_lines([\n 'Installing yarn',\n 'WARNING: Please note that Oppia uses Yarn to manage node packages',\n 'do *NOT* use npm. For more information on how to use yarn,',\n 'visit https://yarnpkg.com/en/docs/usage.'])\n\n # NB: Update .yarnrc if the yarn version below is changed.\n yarn_version = 'v1.17.3'\n yarn_file_name = 'yarn-%s.tar.gz' % yarn_version\n download_and_install_package(\n 'https://github.com/yarnpkg/yarn/releases/download/%s/%s'\n % (yarn_version, yarn_file_name), yarn_file_name)\n\n # Adjust path to support the default Chrome locations for Unix, Windows and\n # Mac OS.\n if os.environ.get('TRAVIS'):\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile('/usr/bin/google-chrome'):\n # Unix.\n chrome_bin = '/usr/bin/google-chrome'\n elif os.path.isfile('/usr/bin/chromium-browser'):\n # Unix.\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile(\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # Windows.\n chrome_bin = (\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # WSL.\n chrome_bin = (\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):\n # Mac OS.\n chrome_bin = (\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')\n else:\n python_utils.PRINT('Chrome is not found, stopping ...')\n raise Exception\n\n os.environ['CHROME_BIN'] = chrome_bin\n python_utils.PRINT('Environment setup completed.')\n\n\n# The 'no coverage' pragma is used as this line is un-testable. This is because\n# it will only be called when setup.py is used as a script.\nif __name__ == '__main__': # pragma: no cover\n main()\n", "path": "scripts/setup.py"}], "after_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS-IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python execution environent set up for all scripts.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\nimport python_utils\n\nfrom . import clean\nfrom . import common\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nPython execution environent set up for all scripts.\n\"\"\")\n\n\ndef create_directory(directory_path):\n \"\"\"Creates a new directory. Does not do anything if directory already\n exists.\n\n Args:\n directory_path: str. Directory path to be created.\n \"\"\"\n if os.path.exists(directory_path):\n return\n os.makedirs(directory_path)\n\n\n# This function takes a command for python as its only input.\n# It checks this input for a specific version of python and returns false\n# if it does not match the expected prefix.\ndef test_python_version():\n running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)\n if running_python_version != '2.7':\n python_utils.PRINT('Please use Python2.7. Exiting...')\n # If OS is Windows, print helpful error message about adding Python to\n # path.\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n common.print_each_string_after_two_new_lines([\n 'It looks like you are using Windows. If you have Python '\n 'installed,',\n 'make sure it is in your PATH and that PYTHONPATH is set.',\n 'If you have two versions of Python (ie, Python 2.7 and 3), '\n 'specify 2.7 before other versions of Python when setting the '\n 'PATH.',\n 'Here are some helpful articles:',\n 'http://docs.python-guide.org/en/latest/starting/install/win/',\n 'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'\n 'pythonpath-in-windows-7'])\n # Exit when no suitable Python environment can be found.\n raise Exception\n\n\ndef download_and_install_package(url_to_retrieve, filename):\n \"\"\"Downloads and installs package in Oppia tools directory.\n\n Args:\n url_to_retrieve: string. The url from which package is to be\n downloaded.\n filename: string. The name of the tar file.\n \"\"\"\n python_utils.url_retrieve(url_to_retrieve, filename=filename)\n tar = tarfile.open(name=filename)\n tar.extractall(path=common.OPPIA_TOOLS_DIR)\n tar.close()\n os.remove(filename)\n\n\ndef main(args=None):\n \"\"\"Runs the script to setup Oppia.\"\"\"\n unused_parsed_args = _PARSER.parse_args(args=args)\n test_python_version()\n\n # The second option allows this script to also be run from deployment\n # folders.\n if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(\n 'deploy-'):\n python_utils.PRINT('')\n python_utils.PRINT(\n 'WARNING This script should be run from the oppia/ root folder.')\n python_utils.PRINT('')\n raise Exception\n\n # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This\n # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)\n # may use it in a situation where relative paths won't work as expected(such\n # as $PYTHONPATH).\n create_directory(common.OPPIA_TOOLS_DIR)\n create_directory(common.THIRD_PARTY_DIR)\n create_directory(common.NODE_MODULES_PATH)\n\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n # Node is a requirement for all installation scripts. Here, we check if\n # the OS supports node.js installation; if not, we exit with an error.\n common.print_each_string_after_two_new_lines([\n 'WARNING: Unsupported OS for installation of node.js.',\n 'If you are running this script on Windows, see the instructions',\n 'here regarding installation of node.js:',\n 'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'\n '%29',\n 'STATUS: Installation completed except for node.js. Exiting.'])\n raise Exception\n\n # Download and install node.js.\n python_utils.PRINT(\n 'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.NODE_PATH):\n python_utils.PRINT('Installing Node.js')\n if os_info[0] == 'Darwin':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-darwin-x64'\n else:\n node_file_name = 'node-v10.15.3'\n elif os_info[0] == 'Linux':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-linux-x64'\n else:\n node_file_name = 'node-v10.15.3'\n\n download_and_install_package(\n 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,\n 'node-download.tgz')\n os.rename(\n os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),\n common.NODE_PATH)\n\n if node_file_name == 'node-v10.15.3':\n with common.CD(common.NODE_PATH):\n subprocess.check_call(['./configure'])\n subprocess.check_call(['make'])\n\n # Change ownership of node_modules.\n # Note: on some machines, these commands seem to take quite a long time.\n common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)\n common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)\n\n # Download and install yarn.\n python_utils.PRINT(\n 'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.YARN_PATH):\n python_utils.PRINT('Removing package-lock.json')\n clean.delete_file('package-lock.json')\n common.print_each_string_after_two_new_lines([\n 'Installing yarn',\n 'WARNING: Please note that Oppia uses Yarn to manage node packages',\n 'do *NOT* use npm. For more information on how to use yarn,',\n 'visit https://yarnpkg.com/en/docs/usage.'])\n\n # NB: Update .yarnrc if the yarn version below is changed.\n yarn_version = 'v1.17.3'\n yarn_file_name = 'yarn-%s.tar.gz' % yarn_version\n download_and_install_package(\n 'https://github.com/yarnpkg/yarn/releases/download/%s/%s'\n % (yarn_version, yarn_file_name), yarn_file_name)\n\n # Adjust path to support the default Chrome locations for Unix, Windows and\n # Mac OS.\n if os.environ.get('TRAVIS'):\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile('/usr/bin/google-chrome'):\n # Unix.\n chrome_bin = '/usr/bin/google-chrome'\n elif os.path.isfile('/usr/bin/chromium-browser'):\n # Unix.\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile(\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # Windows.\n chrome_bin = (\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # WSL.\n chrome_bin = (\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):\n # Mac OS.\n chrome_bin = (\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')\n else:\n python_utils.PRINT('Chrome is not found, stopping ...')\n raise Exception\n\n os.environ['CHROME_BIN'] = chrome_bin\n python_utils.PRINT('Environment setup completed.')\n\n\n# The 'no coverage' pragma is used as this line is un-testable. This is because\n# it will only be called when setup.py is used as a script.\nif __name__ == '__main__': # pragma: no cover\n main()\n", "path": "scripts/setup.py"}]} | 3,017 | 399 |
gh_patches_debug_3537 | rasdani/github-patches | git_diff | ethereum__web3.py-1334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Python `collections` Deprecation warnings
* Python: 3.7 and below
### What was wrong?
Python 3.8 is changing the way imports from `collections` are being handled. The following Deprecation warning describes the issue:
`DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working`
### How can it be fixed?
Go through the codebase and swap out any `collections` imports for `collections.abc`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/_utils/formatters.py`
Content:
```
1 from collections import (
2 Iterable,
3 Mapping,
4 )
5
6 from eth_utils import (
7 is_dict,
8 is_list_like,
9 is_string,
10 to_dict,
11 to_list,
12 )
13
14 from web3._utils.decorators import (
15 reject_recursive_repeats,
16 )
17 from web3._utils.toolz import (
18 compose,
19 curry,
20 dissoc,
21 )
22
23
24 def hex_to_integer(value):
25 return int(value, 16)
26
27
28 integer_to_hex = hex
29
30
31 @curry
32 @to_list
33 def apply_formatter_at_index(formatter, at_index, value):
34 if at_index + 1 > len(value):
35 raise IndexError(
36 "Not enough values in iterable to apply formatter. Got: {0}. "
37 "Need: {1}".format(len(value), at_index + 1)
38 )
39 for index, item in enumerate(value):
40 if index == at_index:
41 yield formatter(item)
42 else:
43 yield item
44
45
46 def apply_formatters_to_args(*formatters):
47 return compose(*(
48 apply_formatter_at_index(formatter, index)
49 for index, formatter
50 in enumerate(formatters)
51 ))
52
53
54 @curry
55 def apply_formatter_if(condition, formatter, value):
56 if condition(value):
57 return formatter(value)
58 else:
59 return value
60
61
62 @curry
63 @to_dict
64 def apply_formatters_to_dict(formatters, value):
65 for key, item in value.items():
66 if key in formatters:
67 try:
68 yield key, formatters[key](item)
69 except (TypeError, ValueError) as exc:
70 raise type(exc)("Could not format value %r as field %r" % (item, key)) from exc
71 else:
72 yield key, item
73
74
75 @curry
76 @to_list
77 def apply_formatter_to_array(formatter, value):
78 for item in value:
79 yield formatter(item)
80
81
82 @curry
83 def apply_one_of_formatters(formatter_condition_pairs, value):
84 for formatter, condition in formatter_condition_pairs:
85 if condition(value):
86 return formatter(value)
87 else:
88 raise ValueError("The provided value did not satisfy any of the formatter conditions")
89
90
91 def map_collection(func, collection):
92 """
93 Apply func to each element of a collection, or value of a dictionary.
94 If the value is not a collection, return it unmodified
95 """
96 datatype = type(collection)
97 if isinstance(collection, Mapping):
98 return datatype((key, func(val)) for key, val in collection.items())
99 if is_string(collection):
100 return collection
101 elif isinstance(collection, Iterable):
102 return datatype(map(func, collection))
103 else:
104 return collection
105
106
107 @reject_recursive_repeats
108 def recursive_map(func, data):
109 """
110 Apply func to data, and any collection items inside data (using map_collection).
111 Define func so that it only applies to the type of value that you want it to apply to.
112 """
113 def recurse(item):
114 return recursive_map(func, item)
115 items_mapped = map_collection(recurse, data)
116 return func(items_mapped)
117
118
119 def static_return(value):
120 def inner(*args, **kwargs):
121 return value
122 return inner
123
124
125 def static_result(value):
126 def inner(*args, **kwargs):
127 return {'result': value}
128 return inner
129
130
131 @curry
132 @to_dict
133 def apply_key_map(key_mappings, value):
134 for key, item in value.items():
135 if key in key_mappings:
136 yield key_mappings[key], item
137 else:
138 yield key, item
139
140
141 def is_array_of_strings(value):
142 if not is_list_like(value):
143 return False
144 return all((is_string(item) for item in value))
145
146
147 def is_array_of_dicts(value):
148 if not is_list_like(value):
149 return False
150 return all((is_dict(item) for item in value))
151
152
153 @curry
154 def remove_key_if(key, remove_if, input_dict):
155 if key in input_dict and remove_if(input_dict):
156 return dissoc(input_dict, key)
157 else:
158 return input_dict
159
```
Path: `web3/datastructures.py`
Content:
```
1 from collections import (
2 Hashable,
3 Mapping,
4 MutableMapping,
5 OrderedDict,
6 Sequence,
7 )
8
9 from eth_utils import (
10 is_integer,
11 )
12
13 from web3._utils.formatters import (
14 recursive_map,
15 )
16
17 # Hashable must be immutable:
18 # "the implementation of hashable collections requires that a key's hash value is immutable"
19 # https://docs.python.org/3/reference/datamodel.html#object.__hash__
20
21
22 class ReadableAttributeDict(Mapping):
23 """
24 The read attributes for the AttributeDict types
25 """
26
27 def __init__(self, dictionary, *args, **kwargs):
28 self.__dict__ = dict(dictionary)
29 self.__dict__.update(dict(*args, **kwargs))
30
31 def __getitem__(self, key):
32 return self.__dict__[key]
33
34 def __iter__(self):
35 return iter(self.__dict__)
36
37 def __len__(self):
38 return len(self.__dict__)
39
40 def __repr__(self):
41 return self.__class__.__name__ + "(%r)" % self.__dict__
42
43 def _repr_pretty_(self, builder, cycle):
44 """
45 Custom pretty output for the IPython console
46 """
47 builder.text(self.__class__.__name__ + "(")
48 if cycle:
49 builder.text("<cycle>")
50 else:
51 builder.pretty(self.__dict__)
52 builder.text(")")
53
54 @classmethod
55 def _apply_if_mapping(cls, value):
56 if isinstance(value, Mapping):
57 return cls(value)
58 else:
59 return value
60
61 @classmethod
62 def recursive(cls, value):
63 return recursive_map(cls._apply_if_mapping, value)
64
65
66 class MutableAttributeDict(MutableMapping, ReadableAttributeDict):
67
68 def __setitem__(self, key, val):
69 self.__dict__[key] = val
70
71 def __delitem__(self, key):
72 del self.__dict__[key]
73
74
75 class AttributeDict(ReadableAttributeDict, Hashable):
76 """
77 This provides superficial immutability, someone could hack around it
78 """
79
80 def __setattr__(self, attr, val):
81 if attr == '__dict__':
82 super().__setattr__(attr, val)
83 else:
84 raise TypeError('This data is immutable -- create a copy instead of modifying')
85
86 def __delattr__(self, key):
87 raise TypeError('This data is immutable -- create a copy instead of modifying')
88
89 def __hash__(self):
90 return hash(tuple(sorted(self.items())))
91
92 def __eq__(self, other):
93 if isinstance(other, Mapping):
94 return self.__dict__ == dict(other)
95 else:
96 return False
97
98
99 class NamedElementOnion(Mapping):
100 """
101 Add layers to an onion-shaped structure. Optionally, inject to a specific layer.
102 This structure is iterable, where the outermost layer is first, and innermost is last.
103 """
104
105 def __init__(self, init_elements, valid_element=callable):
106 self._queue = OrderedDict()
107 for element in reversed(init_elements):
108 if valid_element(element):
109 self.add(element)
110 else:
111 self.add(*element)
112
113 def add(self, element, name=None):
114 if name is None:
115 name = element
116
117 if name in self._queue:
118 if name is element:
119 raise ValueError("You can't add the same un-named instance twice")
120 else:
121 raise ValueError("You can't add the same name again, use replace instead")
122
123 self._queue[name] = element
124
125 def inject(self, element, name=None, layer=None):
126 """
127 Inject a named element to an arbitrary layer in the onion.
128
129 The current implementation only supports insertion at the innermost layer,
130 or at the outermost layer. Note that inserting to the outermost is equivalent
131 to calling :meth:`add` .
132 """
133 if not is_integer(layer):
134 raise TypeError("The layer for insertion must be an int.")
135 elif layer != 0 and layer != len(self._queue):
136 raise NotImplementedError(
137 "You can only insert to the beginning or end of a %s, currently. "
138 "You tried to insert to %d, but only 0 and %d are permitted. " % (
139 type(self),
140 layer,
141 len(self._queue),
142 )
143 )
144
145 self.add(element, name=name)
146
147 if layer == 0:
148 if name is None:
149 name = element
150 self._queue.move_to_end(name, last=False)
151 elif layer == len(self._queue):
152 return
153 else:
154 raise AssertionError("Impossible to reach: earlier validation raises an error")
155
156 def clear(self):
157 self._queue.clear()
158
159 def replace(self, old, new):
160 if old not in self._queue:
161 raise ValueError("You can't replace unless one already exists, use add instead")
162 to_be_replaced = self._queue[old]
163 if to_be_replaced is old:
164 # re-insert with new name in old slot
165 self._replace_with_new_name(old, new)
166 else:
167 self._queue[old] = new
168 return to_be_replaced
169
170 def remove(self, old):
171 if old not in self._queue:
172 raise ValueError("You can only remove something that has been added")
173 del self._queue[old]
174
175 def _replace_with_new_name(self, old, new):
176 self._queue[new] = new
177 found_old = False
178 for key in list(self._queue.keys()):
179 if not found_old:
180 if key == old:
181 found_old = True
182 continue
183 elif key != new:
184 self._queue.move_to_end(key)
185 del self._queue[old]
186
187 def __iter__(self):
188 elements = self._queue.values()
189 if not isinstance(elements, Sequence):
190 elements = list(elements)
191 return iter(reversed(elements))
192
193 def __add__(self, other):
194 if not isinstance(other, NamedElementOnion):
195 raise NotImplementedError("You can only combine with another NamedElementOnion")
196 combined = self._queue.copy()
197 combined.update(other._queue)
198 return NamedElementOnion(combined.items())
199
200 def __contains__(self, element):
201 return element in self._queue
202
203 def __getitem__(self, element):
204 return self._queue[element]
205
206 def __len__(self):
207 return len(self._queue)
208
209 def __reversed__(self):
210 elements = self._queue.values()
211 if not isinstance(elements, Sequence):
212 elements = list(elements)
213 return iter(elements)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/_utils/formatters.py b/web3/_utils/formatters.py
--- a/web3/_utils/formatters.py
+++ b/web3/_utils/formatters.py
@@ -1,4 +1,4 @@
-from collections import (
+from collections.abc import (
Iterable,
Mapping,
)
diff --git a/web3/datastructures.py b/web3/datastructures.py
--- a/web3/datastructures.py
+++ b/web3/datastructures.py
@@ -1,8 +1,10 @@
from collections import (
+ OrderedDict,
+)
+from collections.abc import (
Hashable,
Mapping,
MutableMapping,
- OrderedDict,
Sequence,
)
| {"golden_diff": "diff --git a/web3/_utils/formatters.py b/web3/_utils/formatters.py\n--- a/web3/_utils/formatters.py\n+++ b/web3/_utils/formatters.py\n@@ -1,4 +1,4 @@\n-from collections import (\n+from collections.abc import (\n Iterable,\n Mapping,\n )\ndiff --git a/web3/datastructures.py b/web3/datastructures.py\n--- a/web3/datastructures.py\n+++ b/web3/datastructures.py\n@@ -1,8 +1,10 @@\n from collections import (\n+ OrderedDict,\n+)\n+from collections.abc import (\n Hashable,\n Mapping,\n MutableMapping,\n- OrderedDict,\n Sequence,\n )\n", "issue": "Remove Python `collections` Deprecation warnings\n* Python: 3.7 and below\r\n\r\n### What was wrong?\r\nPython 3.8 is changing the way imports from `collections` are being handled. The following Deprecation warning describes the issue: \r\n`DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working`\r\n\r\n### How can it be fixed?\r\nGo through the codebase and swap out any `collections` imports for `collections.abc`\n", "before_files": [{"content": "from collections import (\n Iterable,\n Mapping,\n)\n\nfrom eth_utils import (\n is_dict,\n is_list_like,\n is_string,\n to_dict,\n to_list,\n)\n\nfrom web3._utils.decorators import (\n reject_recursive_repeats,\n)\nfrom web3._utils.toolz import (\n compose,\n curry,\n dissoc,\n)\n\n\ndef hex_to_integer(value):\n return int(value, 16)\n\n\ninteger_to_hex = hex\n\n\n@curry\n@to_list\ndef apply_formatter_at_index(formatter, at_index, value):\n if at_index + 1 > len(value):\n raise IndexError(\n \"Not enough values in iterable to apply formatter. Got: {0}. \"\n \"Need: {1}\".format(len(value), at_index + 1)\n )\n for index, item in enumerate(value):\n if index == at_index:\n yield formatter(item)\n else:\n yield item\n\n\ndef apply_formatters_to_args(*formatters):\n return compose(*(\n apply_formatter_at_index(formatter, index)\n for index, formatter\n in enumerate(formatters)\n ))\n\n\n@curry\ndef apply_formatter_if(condition, formatter, value):\n if condition(value):\n return formatter(value)\n else:\n return value\n\n\n@curry\n@to_dict\ndef apply_formatters_to_dict(formatters, value):\n for key, item in value.items():\n if key in formatters:\n try:\n yield key, formatters[key](item)\n except (TypeError, ValueError) as exc:\n raise type(exc)(\"Could not format value %r as field %r\" % (item, key)) from exc\n else:\n yield key, item\n\n\n@curry\n@to_list\ndef apply_formatter_to_array(formatter, value):\n for item in value:\n yield formatter(item)\n\n\n@curry\ndef apply_one_of_formatters(formatter_condition_pairs, value):\n for formatter, condition in formatter_condition_pairs:\n if condition(value):\n return formatter(value)\n else:\n raise ValueError(\"The provided value did not satisfy any of the formatter conditions\")\n\n\ndef map_collection(func, collection):\n \"\"\"\n Apply func to each element of a collection, or value of a dictionary.\n If the value is not a collection, return it unmodified\n \"\"\"\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection\n\n\n@reject_recursive_repeats\ndef recursive_map(func, data):\n \"\"\"\n Apply func to data, and any collection items inside data (using map_collection).\n Define func so that it only applies to the type of value that you want it to apply to.\n \"\"\"\n def recurse(item):\n return recursive_map(func, item)\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)\n\n\ndef static_return(value):\n def inner(*args, **kwargs):\n return value\n return inner\n\n\ndef static_result(value):\n def inner(*args, **kwargs):\n return {'result': value}\n return inner\n\n\n@curry\n@to_dict\ndef apply_key_map(key_mappings, value):\n for key, item in value.items():\n if key in key_mappings:\n yield key_mappings[key], item\n else:\n yield key, item\n\n\ndef is_array_of_strings(value):\n if not is_list_like(value):\n return False\n return all((is_string(item) for item in value))\n\n\ndef is_array_of_dicts(value):\n if not is_list_like(value):\n return False\n return all((is_dict(item) for item in value))\n\n\n@curry\ndef remove_key_if(key, remove_if, input_dict):\n if key in input_dict and remove_if(input_dict):\n return dissoc(input_dict, key)\n else:\n return input_dict\n", "path": "web3/_utils/formatters.py"}, {"content": "from collections import (\n Hashable,\n Mapping,\n MutableMapping,\n OrderedDict,\n Sequence,\n)\n\nfrom eth_utils import (\n is_integer,\n)\n\nfrom web3._utils.formatters import (\n recursive_map,\n)\n\n# Hashable must be immutable:\n# \"the implementation of hashable collections requires that a key's hash value is immutable\"\n# https://docs.python.org/3/reference/datamodel.html#object.__hash__\n\n\nclass ReadableAttributeDict(Mapping):\n \"\"\"\n The read attributes for the AttributeDict types\n \"\"\"\n\n def __init__(self, dictionary, *args, **kwargs):\n self.__dict__ = dict(dictionary)\n self.__dict__.update(dict(*args, **kwargs))\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(%r)\" % self.__dict__\n\n def _repr_pretty_(self, builder, cycle):\n \"\"\"\n Custom pretty output for the IPython console\n \"\"\"\n builder.text(self.__class__.__name__ + \"(\")\n if cycle:\n builder.text(\"<cycle>\")\n else:\n builder.pretty(self.__dict__)\n builder.text(\")\")\n\n @classmethod\n def _apply_if_mapping(cls, value):\n if isinstance(value, Mapping):\n return cls(value)\n else:\n return value\n\n @classmethod\n def recursive(cls, value):\n return recursive_map(cls._apply_if_mapping, value)\n\n\nclass MutableAttributeDict(MutableMapping, ReadableAttributeDict):\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n\nclass AttributeDict(ReadableAttributeDict, Hashable):\n \"\"\"\n This provides superficial immutability, someone could hack around it\n \"\"\"\n\n def __setattr__(self, attr, val):\n if attr == '__dict__':\n super().__setattr__(attr, val)\n else:\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __delattr__(self, key):\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n def __eq__(self, other):\n if isinstance(other, Mapping):\n return self.__dict__ == dict(other)\n else:\n return False\n\n\nclass NamedElementOnion(Mapping):\n \"\"\"\n Add layers to an onion-shaped structure. Optionally, inject to a specific layer.\n This structure is iterable, where the outermost layer is first, and innermost is last.\n \"\"\"\n\n def __init__(self, init_elements, valid_element=callable):\n self._queue = OrderedDict()\n for element in reversed(init_elements):\n if valid_element(element):\n self.add(element)\n else:\n self.add(*element)\n\n def add(self, element, name=None):\n if name is None:\n name = element\n\n if name in self._queue:\n if name is element:\n raise ValueError(\"You can't add the same un-named instance twice\")\n else:\n raise ValueError(\"You can't add the same name again, use replace instead\")\n\n self._queue[name] = element\n\n def inject(self, element, name=None, layer=None):\n \"\"\"\n Inject a named element to an arbitrary layer in the onion.\n\n The current implementation only supports insertion at the innermost layer,\n or at the outermost layer. Note that inserting to the outermost is equivalent\n to calling :meth:`add` .\n \"\"\"\n if not is_integer(layer):\n raise TypeError(\"The layer for insertion must be an int.\")\n elif layer != 0 and layer != len(self._queue):\n raise NotImplementedError(\n \"You can only insert to the beginning or end of a %s, currently. \"\n \"You tried to insert to %d, but only 0 and %d are permitted. \" % (\n type(self),\n layer,\n len(self._queue),\n )\n )\n\n self.add(element, name=name)\n\n if layer == 0:\n if name is None:\n name = element\n self._queue.move_to_end(name, last=False)\n elif layer == len(self._queue):\n return\n else:\n raise AssertionError(\"Impossible to reach: earlier validation raises an error\")\n\n def clear(self):\n self._queue.clear()\n\n def replace(self, old, new):\n if old not in self._queue:\n raise ValueError(\"You can't replace unless one already exists, use add instead\")\n to_be_replaced = self._queue[old]\n if to_be_replaced is old:\n # re-insert with new name in old slot\n self._replace_with_new_name(old, new)\n else:\n self._queue[old] = new\n return to_be_replaced\n\n def remove(self, old):\n if old not in self._queue:\n raise ValueError(\"You can only remove something that has been added\")\n del self._queue[old]\n\n def _replace_with_new_name(self, old, new):\n self._queue[new] = new\n found_old = False\n for key in list(self._queue.keys()):\n if not found_old:\n if key == old:\n found_old = True\n continue\n elif key != new:\n self._queue.move_to_end(key)\n del self._queue[old]\n\n def __iter__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(reversed(elements))\n\n def __add__(self, other):\n if not isinstance(other, NamedElementOnion):\n raise NotImplementedError(\"You can only combine with another NamedElementOnion\")\n combined = self._queue.copy()\n combined.update(other._queue)\n return NamedElementOnion(combined.items())\n\n def __contains__(self, element):\n return element in self._queue\n\n def __getitem__(self, element):\n return self._queue[element]\n\n def __len__(self):\n return len(self._queue)\n\n def __reversed__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(elements)\n", "path": "web3/datastructures.py"}], "after_files": [{"content": "from collections.abc import (\n Iterable,\n Mapping,\n)\n\nfrom eth_utils import (\n is_dict,\n is_list_like,\n is_string,\n to_dict,\n to_list,\n)\n\nfrom web3._utils.decorators import (\n reject_recursive_repeats,\n)\nfrom web3._utils.toolz import (\n compose,\n curry,\n dissoc,\n)\n\n\ndef hex_to_integer(value):\n return int(value, 16)\n\n\ninteger_to_hex = hex\n\n\n@curry\n@to_list\ndef apply_formatter_at_index(formatter, at_index, value):\n if at_index + 1 > len(value):\n raise IndexError(\n \"Not enough values in iterable to apply formatter. Got: {0}. \"\n \"Need: {1}\".format(len(value), at_index + 1)\n )\n for index, item in enumerate(value):\n if index == at_index:\n yield formatter(item)\n else:\n yield item\n\n\ndef apply_formatters_to_args(*formatters):\n return compose(*(\n apply_formatter_at_index(formatter, index)\n for index, formatter\n in enumerate(formatters)\n ))\n\n\n@curry\ndef apply_formatter_if(condition, formatter, value):\n if condition(value):\n return formatter(value)\n else:\n return value\n\n\n@curry\n@to_dict\ndef apply_formatters_to_dict(formatters, value):\n for key, item in value.items():\n if key in formatters:\n try:\n yield key, formatters[key](item)\n except (TypeError, ValueError) as exc:\n raise type(exc)(\"Could not format value %r as field %r\" % (item, key)) from exc\n else:\n yield key, item\n\n\n@curry\n@to_list\ndef apply_formatter_to_array(formatter, value):\n for item in value:\n yield formatter(item)\n\n\n@curry\ndef apply_one_of_formatters(formatter_condition_pairs, value):\n for formatter, condition in formatter_condition_pairs:\n if condition(value):\n return formatter(value)\n else:\n raise ValueError(\"The provided value did not satisfy any of the formatter conditions\")\n\n\ndef map_collection(func, collection):\n \"\"\"\n Apply func to each element of a collection, or value of a dictionary.\n If the value is not a collection, return it unmodified\n \"\"\"\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection\n\n\n@reject_recursive_repeats\ndef recursive_map(func, data):\n \"\"\"\n Apply func to data, and any collection items inside data (using map_collection).\n Define func so that it only applies to the type of value that you want it to apply to.\n \"\"\"\n def recurse(item):\n return recursive_map(func, item)\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)\n\n\ndef static_return(value):\n def inner(*args, **kwargs):\n return value\n return inner\n\n\ndef static_result(value):\n def inner(*args, **kwargs):\n return {'result': value}\n return inner\n\n\n@curry\n@to_dict\ndef apply_key_map(key_mappings, value):\n for key, item in value.items():\n if key in key_mappings:\n yield key_mappings[key], item\n else:\n yield key, item\n\n\ndef is_array_of_strings(value):\n if not is_list_like(value):\n return False\n return all((is_string(item) for item in value))\n\n\ndef is_array_of_dicts(value):\n if not is_list_like(value):\n return False\n return all((is_dict(item) for item in value))\n\n\n@curry\ndef remove_key_if(key, remove_if, input_dict):\n if key in input_dict and remove_if(input_dict):\n return dissoc(input_dict, key)\n else:\n return input_dict\n", "path": "web3/_utils/formatters.py"}, {"content": "from collections import (\n OrderedDict,\n)\nfrom collections.abc import (\n Hashable,\n Mapping,\n MutableMapping,\n Sequence,\n)\n\nfrom eth_utils import (\n is_integer,\n)\n\nfrom web3._utils.formatters import (\n recursive_map,\n)\n\n# Hashable must be immutable:\n# \"the implementation of hashable collections requires that a key's hash value is immutable\"\n# https://docs.python.org/3/reference/datamodel.html#object.__hash__\n\n\nclass ReadableAttributeDict(Mapping):\n \"\"\"\n The read attributes for the AttributeDict types\n \"\"\"\n\n def __init__(self, dictionary, *args, **kwargs):\n self.__dict__ = dict(dictionary)\n self.__dict__.update(dict(*args, **kwargs))\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(%r)\" % self.__dict__\n\n def _repr_pretty_(self, builder, cycle):\n \"\"\"\n Custom pretty output for the IPython console\n \"\"\"\n builder.text(self.__class__.__name__ + \"(\")\n if cycle:\n builder.text(\"<cycle>\")\n else:\n builder.pretty(self.__dict__)\n builder.text(\")\")\n\n @classmethod\n def _apply_if_mapping(cls, value):\n if isinstance(value, Mapping):\n return cls(value)\n else:\n return value\n\n @classmethod\n def recursive(cls, value):\n return recursive_map(cls._apply_if_mapping, value)\n\n\nclass MutableAttributeDict(MutableMapping, ReadableAttributeDict):\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n\nclass AttributeDict(ReadableAttributeDict, Hashable):\n \"\"\"\n This provides superficial immutability, someone could hack around it\n \"\"\"\n\n def __setattr__(self, attr, val):\n if attr == '__dict__':\n super().__setattr__(attr, val)\n else:\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __delattr__(self, key):\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n def __eq__(self, other):\n if isinstance(other, Mapping):\n return self.__dict__ == dict(other)\n else:\n return False\n\n\nclass NamedElementOnion(Mapping):\n \"\"\"\n Add layers to an onion-shaped structure. Optionally, inject to a specific layer.\n This structure is iterable, where the outermost layer is first, and innermost is last.\n \"\"\"\n\n def __init__(self, init_elements, valid_element=callable):\n self._queue = OrderedDict()\n for element in reversed(init_elements):\n if valid_element(element):\n self.add(element)\n else:\n self.add(*element)\n\n def add(self, element, name=None):\n if name is None:\n name = element\n\n if name in self._queue:\n if name is element:\n raise ValueError(\"You can't add the same un-named instance twice\")\n else:\n raise ValueError(\"You can't add the same name again, use replace instead\")\n\n self._queue[name] = element\n\n def inject(self, element, name=None, layer=None):\n \"\"\"\n Inject a named element to an arbitrary layer in the onion.\n\n The current implementation only supports insertion at the innermost layer,\n or at the outermost layer. Note that inserting to the outermost is equivalent\n to calling :meth:`add` .\n \"\"\"\n if not is_integer(layer):\n raise TypeError(\"The layer for insertion must be an int.\")\n elif layer != 0 and layer != len(self._queue):\n raise NotImplementedError(\n \"You can only insert to the beginning or end of a %s, currently. \"\n \"You tried to insert to %d, but only 0 and %d are permitted. \" % (\n type(self),\n layer,\n len(self._queue),\n )\n )\n\n self.add(element, name=name)\n\n if layer == 0:\n if name is None:\n name = element\n self._queue.move_to_end(name, last=False)\n elif layer == len(self._queue):\n return\n else:\n raise AssertionError(\"Impossible to reach: earlier validation raises an error\")\n\n def clear(self):\n self._queue.clear()\n\n def replace(self, old, new):\n if old not in self._queue:\n raise ValueError(\"You can't replace unless one already exists, use add instead\")\n to_be_replaced = self._queue[old]\n if to_be_replaced is old:\n # re-insert with new name in old slot\n self._replace_with_new_name(old, new)\n else:\n self._queue[old] = new\n return to_be_replaced\n\n def remove(self, old):\n if old not in self._queue:\n raise ValueError(\"You can only remove something that has been added\")\n del self._queue[old]\n\n def _replace_with_new_name(self, old, new):\n self._queue[new] = new\n found_old = False\n for key in list(self._queue.keys()):\n if not found_old:\n if key == old:\n found_old = True\n continue\n elif key != new:\n self._queue.move_to_end(key)\n del self._queue[old]\n\n def __iter__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(reversed(elements))\n\n def __add__(self, other):\n if not isinstance(other, NamedElementOnion):\n raise NotImplementedError(\"You can only combine with another NamedElementOnion\")\n combined = self._queue.copy()\n combined.update(other._queue)\n return NamedElementOnion(combined.items())\n\n def __contains__(self, element):\n return element in self._queue\n\n def __getitem__(self, element):\n return self._queue[element]\n\n def __len__(self):\n return len(self._queue)\n\n def __reversed__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(elements)\n", "path": "web3/datastructures.py"}]} | 3,568 | 147 |
gh_patches_debug_37508 | rasdani/github-patches | git_diff | optuna__optuna-4973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TensorBoard integration fails if None is suggested
### Expected behavior
`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.
### Environment
- Optuna version:3.2.0
- Python version:3.9.13
- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31
### Error messages, stack traces, or logs
```shell
File /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)
505 raise ValueError("Empty domain with no dtype specified")
506 if dtype not in (int, float, bool, str):
--> 507 raise ValueError("Unknown dtype: %r" % (dtype,))
508 self._dtype = dtype
509 for value in self._values:
ValueError: Unknown dtype: <class 'NoneType'>
```
```
### Steps to reproduce
1. If I run this
```python
import optuna
from optuna.integration.tensorboard import TensorBoardCallback
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
X, y = make_classification(n_samples=1000, n_features=4,
n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)
def objective(trial):
params = {
'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),
}
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_val_pred = clf.predict(X_val)
f1 = f1_score(y_val, y_val_pred)
return f1
tensorboard_callback = TensorBoardCallback("logs/", metric_name="f1")
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])
```
It would return an error above.
2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.
### Additional context (optional)
_No response_
TensorBoard integration fails if None is suggested
### Expected behavior
`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.
### Environment
- Optuna version:3.2.0
- Python version:3.9.13
- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31
### Error messages, stack traces, or logs
```shell
File /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)
505 raise ValueError("Empty domain with no dtype specified")
506 if dtype not in (int, float, bool, str):
--> 507 raise ValueError("Unknown dtype: %r" % (dtype,))
508 self._dtype = dtype
509 for value in self._values:
ValueError: Unknown dtype: <class 'NoneType'>
```
```
### Steps to reproduce
1. If I run this
```python
import optuna
from optuna.integration.tensorboard import TensorBoardCallback
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
X, y = make_classification(n_samples=1000, n_features=4,
n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)
def objective(trial):
params = {
'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),
}
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_val_pred = clf.predict(X_val)
f1 = f1_score(y_val, y_val_pred)
return f1
tensorboard_callback = TensorBoardCallback("logs/", metric_name="f1")
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])
```
It would return an error above.
2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.
### Additional context (optional)
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/integration/tensorboard.py`
Content:
```
1 import os
2 from typing import Dict
3
4 import optuna
5 from optuna._experimental import experimental_class
6 from optuna._imports import try_import
7
8
9 with try_import() as _imports:
10 from tensorboard.plugins.hparams import api as hp
11 import tensorflow as tf
12
13
14 @experimental_class("2.0.0")
15 class TensorBoardCallback:
16 """Callback to track Optuna trials with TensorBoard.
17
18 This callback adds relevant information that is tracked by Optuna to TensorBoard.
19
20 See `the example <https://github.com/optuna/optuna-examples/blob/main/
21 tensorboard/tensorboard_simple.py>`_.
22
23 Args:
24 dirname:
25 Directory to store TensorBoard logs.
26 metric_name:
27 Name of the metric. Since the metric itself is just a number,
28 `metric_name` can be used to give it a name. So you know later
29 if it was roc-auc or accuracy.
30
31 """
32
33 def __init__(self, dirname: str, metric_name: str) -> None:
34 _imports.check()
35 self._dirname = dirname
36 self._metric_name = metric_name
37 self._hp_params: Dict[str, hp.HParam] = {}
38
39 def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
40 if len(self._hp_params) == 0:
41 self._initialization(study)
42 if trial.state != optuna.trial.TrialState.COMPLETE:
43 return
44 trial_value = trial.value if trial.value is not None else float("nan")
45 hparams = {}
46 for param_name, param_value in trial.params.items():
47 if param_name not in self._hp_params:
48 self._add_distributions(trial.distributions)
49 hparams[self._hp_params[param_name]] = param_value
50 run_name = "trial-%d" % trial.number
51 run_dir = os.path.join(self._dirname, run_name)
52 with tf.summary.create_file_writer(run_dir).as_default():
53 hp.hparams(hparams, trial_id=run_name) # record the values used in this trial
54 tf.summary.scalar(self._metric_name, trial_value, step=trial.number)
55
56 def _add_distributions(
57 self, distributions: Dict[str, optuna.distributions.BaseDistribution]
58 ) -> None:
59 supported_distributions = (
60 optuna.distributions.CategoricalDistribution,
61 optuna.distributions.FloatDistribution,
62 optuna.distributions.IntDistribution,
63 )
64
65 for param_name, param_distribution in distributions.items():
66 if isinstance(param_distribution, optuna.distributions.FloatDistribution):
67 self._hp_params[param_name] = hp.HParam(
68 param_name,
69 hp.RealInterval(float(param_distribution.low), float(param_distribution.high)),
70 )
71 elif isinstance(param_distribution, optuna.distributions.IntDistribution):
72 self._hp_params[param_name] = hp.HParam(
73 param_name,
74 hp.IntInterval(param_distribution.low, param_distribution.high),
75 )
76 elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):
77 self._hp_params[param_name] = hp.HParam(
78 param_name,
79 hp.Discrete(param_distribution.choices),
80 )
81 else:
82 distribution_list = [
83 distribution.__name__ for distribution in supported_distributions
84 ]
85 raise NotImplementedError(
86 "The distribution {} is not implemented. "
87 "The parameter distribution should be one of the {}".format(
88 param_distribution, distribution_list
89 )
90 )
91
92 def _initialization(self, study: optuna.Study) -> None:
93 completed_trials = [
94 trial
95 for trial in study.get_trials(deepcopy=False)
96 if trial.state == optuna.trial.TrialState.COMPLETE
97 ]
98 for trial in completed_trials:
99 self._add_distributions(trial.distributions)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py
--- a/optuna/integration/tensorboard.py
+++ b/optuna/integration/tensorboard.py
@@ -4,12 +4,15 @@
import optuna
from optuna._experimental import experimental_class
from optuna._imports import try_import
+from optuna.logging import get_logger
with try_import() as _imports:
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
+_logger = get_logger(__name__)
+
@experimental_class("2.0.0")
class TensorBoardCallback:
@@ -46,7 +49,11 @@
for param_name, param_value in trial.params.items():
if param_name not in self._hp_params:
self._add_distributions(trial.distributions)
- hparams[self._hp_params[param_name]] = param_value
+ param = self._hp_params[param_name]
+ if isinstance(param.domain, hp.Discrete):
+ hparams[param] = param.domain.dtype(param_value)
+ else:
+ hparams[param] = param_value
run_name = "trial-%d" % trial.number
run_dir = os.path.join(self._dirname, run_name)
with tf.summary.create_file_writer(run_dir).as_default():
@@ -74,9 +81,24 @@
hp.IntInterval(param_distribution.low, param_distribution.high),
)
elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):
+ choices = param_distribution.choices
+ dtype = type(choices[0])
+ if any(not isinstance(choice, dtype) for choice in choices):
+ _logger.warning(
+ "Choices contains mixed types, which is not supported by TensorBoard. "
+ "Converting all choices to strings."
+ )
+ choices = tuple(map(str, choices))
+ elif dtype not in (int, float, bool, str):
+ _logger.warning(
+ f"Choices are of type {dtype}, which is not supported by TensorBoard. "
+ "Converting all choices to strings."
+ )
+ choices = tuple(map(str, choices))
+
self._hp_params[param_name] = hp.HParam(
param_name,
- hp.Discrete(param_distribution.choices),
+ hp.Discrete(choices),
)
else:
distribution_list = [
| {"golden_diff": "diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py\n--- a/optuna/integration/tensorboard.py\n+++ b/optuna/integration/tensorboard.py\n@@ -4,12 +4,15 @@\n import optuna\n from optuna._experimental import experimental_class\n from optuna._imports import try_import\n+from optuna.logging import get_logger\n \n \n with try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n \n+_logger = get_logger(__name__)\n+\n \n @experimental_class(\"2.0.0\")\n class TensorBoardCallback:\n@@ -46,7 +49,11 @@\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n- hparams[self._hp_params[param_name]] = param_value\n+ param = self._hp_params[param_name]\n+ if isinstance(param.domain, hp.Discrete):\n+ hparams[param] = param.domain.dtype(param_value)\n+ else:\n+ hparams[param] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n@@ -74,9 +81,24 @@\n hp.IntInterval(param_distribution.low, param_distribution.high),\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n+ choices = param_distribution.choices\n+ dtype = type(choices[0])\n+ if any(not isinstance(choice, dtype) for choice in choices):\n+ _logger.warning(\n+ \"Choices contains mixed types, which is not supported by TensorBoard. \"\n+ \"Converting all choices to strings.\"\n+ )\n+ choices = tuple(map(str, choices))\n+ elif dtype not in (int, float, bool, str):\n+ _logger.warning(\n+ f\"Choices are of type {dtype}, which is not supported by TensorBoard. \"\n+ \"Converting all choices to strings.\"\n+ )\n+ choices = tuple(map(str, choices))\n+\n self._hp_params[param_name] = hp.HParam(\n param_name,\n- hp.Discrete(param_distribution.choices),\n+ hp.Discrete(choices),\n )\n else:\n distribution_list = [\n", "issue": "TensorBoard integration fails if None is suggested\n### Expected behavior\n\n`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.\n\n### Environment\n\n- Optuna version:3.2.0\r\n- Python version:3.9.13\r\n- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31\n\n### Error messages, stack traces, or logs\n\n```shell\nFile /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)\r\n 505 raise ValueError(\"Empty domain with no dtype specified\")\r\n 506 if dtype not in (int, float, bool, str):\r\n--> 507 raise ValueError(\"Unknown dtype: %r\" % (dtype,))\r\n 508 self._dtype = dtype\r\n 509 for value in self._values:\r\n\r\nValueError: Unknown dtype: <class 'NoneType'>\r\n```\n```\n\n\n### Steps to reproduce\n\n1. If I run this\r\n```python\r\nimport optuna\r\nfrom optuna.integration.tensorboard import TensorBoardCallback\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\n\r\nX, y = make_classification(n_samples=1000, n_features=4,\r\n n_informative=2, n_redundant=0,\r\n random_state=0, shuffle=False)\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)\r\n\r\ndef objective(trial):\r\n params = {\r\n 'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),\r\n }\r\n clf = RandomForestClassifier()\r\n clf.fit(X_train, y_train)\r\n\r\n y_val_pred = clf.predict(X_val)\r\n f1 = f1_score(y_val, y_val_pred)\r\n return f1\r\n\r\ntensorboard_callback = TensorBoardCallback(\"logs/\", metric_name=\"f1\")\r\nstudy = optuna.create_study(direction=\"minimize\")\r\n\r\nstudy.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])\r\n```\r\n\r\nIt would return an error above.\r\n\r\n2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.\n\n### Additional context (optional)\n\n_No response_\nTensorBoard integration fails if None is suggested\n### Expected behavior\n\n`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.\n\n### Environment\n\n- Optuna version:3.2.0\r\n- Python version:3.9.13\r\n- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31\n\n### Error messages, stack traces, or logs\n\n```shell\nFile /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)\r\n 505 raise ValueError(\"Empty domain with no dtype specified\")\r\n 506 if dtype not in (int, float, bool, str):\r\n--> 507 raise ValueError(\"Unknown dtype: %r\" % (dtype,))\r\n 508 self._dtype = dtype\r\n 509 for value in self._values:\r\n\r\nValueError: Unknown dtype: <class 'NoneType'>\r\n```\n```\n\n\n### Steps to reproduce\n\n1. If I run this\r\n```python\r\nimport optuna\r\nfrom optuna.integration.tensorboard import TensorBoardCallback\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\n\r\nX, y = make_classification(n_samples=1000, n_features=4,\r\n n_informative=2, n_redundant=0,\r\n random_state=0, shuffle=False)\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)\r\n\r\ndef objective(trial):\r\n params = {\r\n 'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),\r\n }\r\n clf = RandomForestClassifier()\r\n clf.fit(X_train, y_train)\r\n\r\n y_val_pred = clf.predict(X_val)\r\n f1 = f1_score(y_val, y_val_pred)\r\n return f1\r\n\r\ntensorboard_callback = TensorBoardCallback(\"logs/\", metric_name=\"f1\")\r\nstudy = optuna.create_study(direction=\"minimize\")\r\n\r\nstudy.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])\r\n```\r\n\r\nIt would return an error above.\r\n\r\n2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import os\nfrom typing import Dict\n\nimport optuna\nfrom optuna._experimental import experimental_class\nfrom optuna._imports import try_import\n\n\nwith try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n\n\n@experimental_class(\"2.0.0\")\nclass TensorBoardCallback:\n \"\"\"Callback to track Optuna trials with TensorBoard.\n\n This callback adds relevant information that is tracked by Optuna to TensorBoard.\n\n See `the example <https://github.com/optuna/optuna-examples/blob/main/\n tensorboard/tensorboard_simple.py>`_.\n\n Args:\n dirname:\n Directory to store TensorBoard logs.\n metric_name:\n Name of the metric. Since the metric itself is just a number,\n `metric_name` can be used to give it a name. So you know later\n if it was roc-auc or accuracy.\n\n \"\"\"\n\n def __init__(self, dirname: str, metric_name: str) -> None:\n _imports.check()\n self._dirname = dirname\n self._metric_name = metric_name\n self._hp_params: Dict[str, hp.HParam] = {}\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n if len(self._hp_params) == 0:\n self._initialization(study)\n if trial.state != optuna.trial.TrialState.COMPLETE:\n return\n trial_value = trial.value if trial.value is not None else float(\"nan\")\n hparams = {}\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n hparams[self._hp_params[param_name]] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams, trial_id=run_name) # record the values used in this trial\n tf.summary.scalar(self._metric_name, trial_value, step=trial.number)\n\n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n ) -> None:\n supported_distributions = (\n optuna.distributions.CategoricalDistribution,\n optuna.distributions.FloatDistribution,\n optuna.distributions.IntDistribution,\n )\n\n for param_name, param_distribution in distributions.items():\n if isinstance(param_distribution, optuna.distributions.FloatDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.RealInterval(float(param_distribution.low), float(param_distribution.high)),\n )\n elif isinstance(param_distribution, optuna.distributions.IntDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.IntInterval(param_distribution.low, param_distribution.high),\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.Discrete(param_distribution.choices),\n )\n else:\n distribution_list = [\n distribution.__name__ for distribution in supported_distributions\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _initialization(self, study: optuna.Study) -> None:\n completed_trials = [\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == optuna.trial.TrialState.COMPLETE\n ]\n for trial in completed_trials:\n self._add_distributions(trial.distributions)\n", "path": "optuna/integration/tensorboard.py"}], "after_files": [{"content": "import os\nfrom typing import Dict\n\nimport optuna\nfrom optuna._experimental import experimental_class\nfrom optuna._imports import try_import\nfrom optuna.logging import get_logger\n\n\nwith try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n\n_logger = get_logger(__name__)\n\n\n@experimental_class(\"2.0.0\")\nclass TensorBoardCallback:\n \"\"\"Callback to track Optuna trials with TensorBoard.\n\n This callback adds relevant information that is tracked by Optuna to TensorBoard.\n\n See `the example <https://github.com/optuna/optuna-examples/blob/main/\n tensorboard/tensorboard_simple.py>`_.\n\n Args:\n dirname:\n Directory to store TensorBoard logs.\n metric_name:\n Name of the metric. Since the metric itself is just a number,\n `metric_name` can be used to give it a name. So you know later\n if it was roc-auc or accuracy.\n\n \"\"\"\n\n def __init__(self, dirname: str, metric_name: str) -> None:\n _imports.check()\n self._dirname = dirname\n self._metric_name = metric_name\n self._hp_params: Dict[str, hp.HParam] = {}\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n if len(self._hp_params) == 0:\n self._initialization(study)\n if trial.state != optuna.trial.TrialState.COMPLETE:\n return\n trial_value = trial.value if trial.value is not None else float(\"nan\")\n hparams = {}\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n param = self._hp_params[param_name]\n if isinstance(param.domain, hp.Discrete):\n hparams[param] = param.domain.dtype(param_value)\n else:\n hparams[param] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams, trial_id=run_name) # record the values used in this trial\n tf.summary.scalar(self._metric_name, trial_value, step=trial.number)\n\n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n ) -> None:\n supported_distributions = (\n optuna.distributions.CategoricalDistribution,\n optuna.distributions.FloatDistribution,\n optuna.distributions.IntDistribution,\n )\n\n for param_name, param_distribution in distributions.items():\n if isinstance(param_distribution, optuna.distributions.FloatDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.RealInterval(float(param_distribution.low), float(param_distribution.high)),\n )\n elif isinstance(param_distribution, optuna.distributions.IntDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.IntInterval(param_distribution.low, param_distribution.high),\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n choices = param_distribution.choices\n dtype = type(choices[0])\n if any(not isinstance(choice, dtype) for choice in choices):\n _logger.warning(\n \"Choices contains mixed types, which is not supported by TensorBoard. \"\n \"Converting all choices to strings.\"\n )\n choices = tuple(map(str, choices))\n elif dtype not in (int, float, bool, str):\n _logger.warning(\n f\"Choices are of type {dtype}, which is not supported by TensorBoard. \"\n \"Converting all choices to strings.\"\n )\n choices = tuple(map(str, choices))\n\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.Discrete(choices),\n )\n else:\n distribution_list = [\n distribution.__name__ for distribution in supported_distributions\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _initialization(self, study: optuna.Study) -> None:\n completed_trials = [\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == optuna.trial.TrialState.COMPLETE\n ]\n for trial in completed_trials:\n self._add_distributions(trial.distributions)\n", "path": "optuna/integration/tensorboard.py"}]} | 2,394 | 517 |
gh_patches_debug_11727 | rasdani/github-patches | git_diff | pypa__setuptools-1607 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Preserving read-only flags on package data causing access denied when building
I'm working with a read-only source directory on Windows and running into access denied errors when calling `python setup.py bdist_wheel`. I noticed that it was only files included in package_data that were causing this, which helped me track it down to:
[setuptools/setuptools/command/build_py.py](
https://github.com/pypa/setuptools/blob/89155abb4222cf5a9dc81120e5c71e26b5af68f9/setuptools/command/build_py.py#L123)
`outf, copied = self.copy_file(srcfile, target)`
which is slightly different from its distutils version:
[cpython/Lib/distutils/command/build_py.py](
https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L141)
`self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False)`
So it looks like it comes down to this preserve_mode flag.
I don't know if there is a good reason to preserve things like read-only flags? There's a [comment](https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L74) about preserving mode further up in the distutils code, which seems to suggest not preserving is the way to go.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/command/build_py.py`
Content:
```
1 from glob import glob
2 from distutils.util import convert_path
3 import distutils.command.build_py as orig
4 import os
5 import fnmatch
6 import textwrap
7 import io
8 import distutils.errors
9 import itertools
10
11 from setuptools.extern import six
12 from setuptools.extern.six.moves import map, filter, filterfalse
13
14 try:
15 from setuptools.lib2to3_ex import Mixin2to3
16 except ImportError:
17
18 class Mixin2to3:
19 def run_2to3(self, files, doctests=True):
20 "do nothing"
21
22
23 class build_py(orig.build_py, Mixin2to3):
24 """Enhanced 'build_py' command that includes data files with packages
25
26 The data files are specified via a 'package_data' argument to 'setup()'.
27 See 'setuptools.dist.Distribution' for more details.
28
29 Also, this version of the 'build_py' command allows you to specify both
30 'py_modules' and 'packages' in the same setup operation.
31 """
32
33 def finalize_options(self):
34 orig.build_py.finalize_options(self)
35 self.package_data = self.distribution.package_data
36 self.exclude_package_data = (self.distribution.exclude_package_data or
37 {})
38 if 'data_files' in self.__dict__:
39 del self.__dict__['data_files']
40 self.__updated_files = []
41 self.__doctests_2to3 = []
42
43 def run(self):
44 """Build modules, packages, and copy data files to build directory"""
45 if not self.py_modules and not self.packages:
46 return
47
48 if self.py_modules:
49 self.build_modules()
50
51 if self.packages:
52 self.build_packages()
53 self.build_package_data()
54
55 self.run_2to3(self.__updated_files, False)
56 self.run_2to3(self.__updated_files, True)
57 self.run_2to3(self.__doctests_2to3, True)
58
59 # Only compile actual .py files, using our base class' idea of what our
60 # output files are.
61 self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
62
63 def __getattr__(self, attr):
64 "lazily compute data files"
65 if attr == 'data_files':
66 self.data_files = self._get_data_files()
67 return self.data_files
68 return orig.build_py.__getattr__(self, attr)
69
70 def build_module(self, module, module_file, package):
71 if six.PY2 and isinstance(package, six.string_types):
72 # avoid errors on Python 2 when unicode is passed (#190)
73 package = package.split('.')
74 outfile, copied = orig.build_py.build_module(self, module, module_file,
75 package)
76 if copied:
77 self.__updated_files.append(outfile)
78 return outfile, copied
79
80 def _get_data_files(self):
81 """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
82 self.analyze_manifest()
83 return list(map(self._get_pkg_data_files, self.packages or ()))
84
85 def _get_pkg_data_files(self, package):
86 # Locate package source directory
87 src_dir = self.get_package_dir(package)
88
89 # Compute package build directory
90 build_dir = os.path.join(*([self.build_lib] + package.split('.')))
91
92 # Strip directory from globbed filenames
93 filenames = [
94 os.path.relpath(file, src_dir)
95 for file in self.find_data_files(package, src_dir)
96 ]
97 return package, src_dir, build_dir, filenames
98
99 def find_data_files(self, package, src_dir):
100 """Return filenames for package's data files in 'src_dir'"""
101 patterns = self._get_platform_patterns(
102 self.package_data,
103 package,
104 src_dir,
105 )
106 globs_expanded = map(glob, patterns)
107 # flatten the expanded globs into an iterable of matches
108 globs_matches = itertools.chain.from_iterable(globs_expanded)
109 glob_files = filter(os.path.isfile, globs_matches)
110 files = itertools.chain(
111 self.manifest_files.get(package, []),
112 glob_files,
113 )
114 return self.exclude_data_files(package, src_dir, files)
115
116 def build_package_data(self):
117 """Copy data files into build directory"""
118 for package, src_dir, build_dir, filenames in self.data_files:
119 for filename in filenames:
120 target = os.path.join(build_dir, filename)
121 self.mkpath(os.path.dirname(target))
122 srcfile = os.path.join(src_dir, filename)
123 outf, copied = self.copy_file(srcfile, target)
124 srcfile = os.path.abspath(srcfile)
125 if (copied and
126 srcfile in self.distribution.convert_2to3_doctests):
127 self.__doctests_2to3.append(outf)
128
129 def analyze_manifest(self):
130 self.manifest_files = mf = {}
131 if not self.distribution.include_package_data:
132 return
133 src_dirs = {}
134 for package in self.packages or ():
135 # Locate package source directory
136 src_dirs[assert_relative(self.get_package_dir(package))] = package
137
138 self.run_command('egg_info')
139 ei_cmd = self.get_finalized_command('egg_info')
140 for path in ei_cmd.filelist.files:
141 d, f = os.path.split(assert_relative(path))
142 prev = None
143 oldf = f
144 while d and d != prev and d not in src_dirs:
145 prev = d
146 d, df = os.path.split(d)
147 f = os.path.join(df, f)
148 if d in src_dirs:
149 if path.endswith('.py') and f == oldf:
150 continue # it's a module, not data
151 mf.setdefault(src_dirs[d], []).append(path)
152
153 def get_data_files(self):
154 pass # Lazily compute data files in _get_data_files() function.
155
156 def check_package(self, package, package_dir):
157 """Check namespace packages' __init__ for declare_namespace"""
158 try:
159 return self.packages_checked[package]
160 except KeyError:
161 pass
162
163 init_py = orig.build_py.check_package(self, package, package_dir)
164 self.packages_checked[package] = init_py
165
166 if not init_py or not self.distribution.namespace_packages:
167 return init_py
168
169 for pkg in self.distribution.namespace_packages:
170 if pkg == package or pkg.startswith(package + '.'):
171 break
172 else:
173 return init_py
174
175 with io.open(init_py, 'rb') as f:
176 contents = f.read()
177 if b'declare_namespace' not in contents:
178 raise distutils.errors.DistutilsError(
179 "Namespace package problem: %s is a namespace package, but "
180 "its\n__init__.py does not call declare_namespace()! Please "
181 'fix it.\n(See the setuptools manual under '
182 '"Namespace Packages" for details.)\n"' % (package,)
183 )
184 return init_py
185
186 def initialize_options(self):
187 self.packages_checked = {}
188 orig.build_py.initialize_options(self)
189
190 def get_package_dir(self, package):
191 res = orig.build_py.get_package_dir(self, package)
192 if self.distribution.src_root is not None:
193 return os.path.join(self.distribution.src_root, res)
194 return res
195
196 def exclude_data_files(self, package, src_dir, files):
197 """Filter filenames for package's data files in 'src_dir'"""
198 files = list(files)
199 patterns = self._get_platform_patterns(
200 self.exclude_package_data,
201 package,
202 src_dir,
203 )
204 match_groups = (
205 fnmatch.filter(files, pattern)
206 for pattern in patterns
207 )
208 # flatten the groups of matches into an iterable of matches
209 matches = itertools.chain.from_iterable(match_groups)
210 bad = set(matches)
211 keepers = (
212 fn
213 for fn in files
214 if fn not in bad
215 )
216 # ditch dupes
217 return list(_unique_everseen(keepers))
218
219 @staticmethod
220 def _get_platform_patterns(spec, package, src_dir):
221 """
222 yield platform-specific path patterns (suitable for glob
223 or fn_match) from a glob-based spec (such as
224 self.package_data or self.exclude_package_data)
225 matching package in src_dir.
226 """
227 raw_patterns = itertools.chain(
228 spec.get('', []),
229 spec.get(package, []),
230 )
231 return (
232 # Each pattern has to be converted to a platform-specific path
233 os.path.join(src_dir, convert_path(pattern))
234 for pattern in raw_patterns
235 )
236
237
238 # from Python docs
239 def _unique_everseen(iterable, key=None):
240 "List unique elements, preserving order. Remember all elements ever seen."
241 # unique_everseen('AAAABBBCCDAABBB') --> A B C D
242 # unique_everseen('ABBCcAD', str.lower) --> A B C D
243 seen = set()
244 seen_add = seen.add
245 if key is None:
246 for element in filterfalse(seen.__contains__, iterable):
247 seen_add(element)
248 yield element
249 else:
250 for element in iterable:
251 k = key(element)
252 if k not in seen:
253 seen_add(k)
254 yield element
255
256
257 def assert_relative(path):
258 if not os.path.isabs(path):
259 return path
260 from distutils.errors import DistutilsSetupError
261
262 msg = textwrap.dedent("""
263 Error: setup script specifies an absolute path:
264
265 %s
266
267 setup() arguments must *always* be /-separated paths relative to the
268 setup.py directory, *never* absolute paths.
269 """).lstrip() % path
270 raise DistutilsSetupError(msg)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py
--- a/setuptools/command/build_py.py
+++ b/setuptools/command/build_py.py
@@ -120,7 +120,7 @@
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
- outf, copied = self.copy_file(srcfile, target)
+ outf, copied = self.copy_file(srcfile, target, preserve_mode=False)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
| {"golden_diff": "diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py\n--- a/setuptools/command/build_py.py\n+++ b/setuptools/command/build_py.py\n@@ -120,7 +120,7 @@\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n- outf, copied = self.copy_file(srcfile, target)\n+ outf, copied = self.copy_file(srcfile, target, preserve_mode=False)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n", "issue": "Preserving read-only flags on package data causing access denied when building\nI'm working with a read-only source directory on Windows and running into access denied errors when calling `python setup.py bdist_wheel`. I noticed that it was only files included in package_data that were causing this, which helped me track it down to:\r\n\r\n[setuptools/setuptools/command/build_py.py](\r\nhttps://github.com/pypa/setuptools/blob/89155abb4222cf5a9dc81120e5c71e26b5af68f9/setuptools/command/build_py.py#L123)\r\n`outf, copied = self.copy_file(srcfile, target)`\r\n\r\nwhich is slightly different from its distutils version:\r\n\r\n[cpython/Lib/distutils/command/build_py.py](\r\nhttps://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L141)\r\n`self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False)`\r\n\r\nSo it looks like it comes down to this preserve_mode flag.\r\n\r\nI don't know if there is a good reason to preserve things like read-only flags? There's a [comment](https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L74) about preserving mode further up in the distutils code, which seems to suggest not preserving is the way to go.\n", "before_files": [{"content": "from glob import glob\nfrom distutils.util import convert_path\nimport distutils.command.build_py as orig\nimport os\nimport fnmatch\nimport textwrap\nimport io\nimport distutils.errors\nimport itertools\n\nfrom setuptools.extern import six\nfrom setuptools.extern.six.moves import map, filter, filterfalse\n\ntry:\n from setuptools.lib2to3_ex import Mixin2to3\nexcept ImportError:\n\n class Mixin2to3:\n def run_2to3(self, files, doctests=True):\n \"do nothing\"\n\n\nclass build_py(orig.build_py, Mixin2to3):\n \"\"\"Enhanced 'build_py' command that includes data files with packages\n\n The data files are specified via a 'package_data' argument to 'setup()'.\n See 'setuptools.dist.Distribution' for more details.\n\n Also, this version of the 'build_py' command allows you to specify both\n 'py_modules' and 'packages' in the same setup operation.\n \"\"\"\n\n def finalize_options(self):\n orig.build_py.finalize_options(self)\n self.package_data = self.distribution.package_data\n self.exclude_package_data = (self.distribution.exclude_package_data or\n {})\n if 'data_files' in self.__dict__:\n del self.__dict__['data_files']\n self.__updated_files = []\n self.__doctests_2to3 = []\n\n def run(self):\n \"\"\"Build modules, packages, and copy data files to build directory\"\"\"\n if not self.py_modules and not self.packages:\n return\n\n if self.py_modules:\n self.build_modules()\n\n if self.packages:\n self.build_packages()\n self.build_package_data()\n\n self.run_2to3(self.__updated_files, False)\n self.run_2to3(self.__updated_files, True)\n self.run_2to3(self.__doctests_2to3, True)\n\n # Only compile actual .py files, using our base class' idea of what our\n # output files are.\n self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))\n\n def __getattr__(self, attr):\n \"lazily compute data files\"\n if attr == 'data_files':\n self.data_files = self._get_data_files()\n return self.data_files\n return orig.build_py.__getattr__(self, attr)\n\n def build_module(self, module, module_file, package):\n if six.PY2 and isinstance(package, six.string_types):\n # avoid errors on Python 2 when unicode is passed (#190)\n package = package.split('.')\n outfile, copied = orig.build_py.build_module(self, module, module_file,\n package)\n if copied:\n self.__updated_files.append(outfile)\n return outfile, copied\n\n def _get_data_files(self):\n \"\"\"Generate list of '(package,src_dir,build_dir,filenames)' tuples\"\"\"\n self.analyze_manifest()\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def _get_pkg_data_files(self, package):\n # Locate package source directory\n src_dir = self.get_package_dir(package)\n\n # Compute package build directory\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\n\n # Strip directory from globbed filenames\n filenames = [\n os.path.relpath(file, src_dir)\n for file in self.find_data_files(package, src_dir)\n ]\n return package, src_dir, build_dir, filenames\n\n def find_data_files(self, package, src_dir):\n \"\"\"Return filenames for package's data files in 'src_dir'\"\"\"\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)\n\n def build_package_data(self):\n \"\"\"Copy data files into build directory\"\"\"\n for package, src_dir, build_dir, filenames in self.data_files:\n for filename in filenames:\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n outf, copied = self.copy_file(srcfile, target)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n self.__doctests_2to3.append(outf)\n\n def analyze_manifest(self):\n self.manifest_files = mf = {}\n if not self.distribution.include_package_data:\n return\n src_dirs = {}\n for package in self.packages or ():\n # Locate package source directory\n src_dirs[assert_relative(self.get_package_dir(package))] = package\n\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n for path in ei_cmd.filelist.files:\n d, f = os.path.split(assert_relative(path))\n prev = None\n oldf = f\n while d and d != prev and d not in src_dirs:\n prev = d\n d, df = os.path.split(d)\n f = os.path.join(df, f)\n if d in src_dirs:\n if path.endswith('.py') and f == oldf:\n continue # it's a module, not data\n mf.setdefault(src_dirs[d], []).append(path)\n\n def get_data_files(self):\n pass # Lazily compute data files in _get_data_files() function.\n\n def check_package(self, package, package_dir):\n \"\"\"Check namespace packages' __init__ for declare_namespace\"\"\"\n try:\n return self.packages_checked[package]\n except KeyError:\n pass\n\n init_py = orig.build_py.check_package(self, package, package_dir)\n self.packages_checked[package] = init_py\n\n if not init_py or not self.distribution.namespace_packages:\n return init_py\n\n for pkg in self.distribution.namespace_packages:\n if pkg == package or pkg.startswith(package + '.'):\n break\n else:\n return init_py\n\n with io.open(init_py, 'rb') as f:\n contents = f.read()\n if b'declare_namespace' not in contents:\n raise distutils.errors.DistutilsError(\n \"Namespace package problem: %s is a namespace package, but \"\n \"its\\n__init__.py does not call declare_namespace()! Please \"\n 'fix it.\\n(See the setuptools manual under '\n '\"Namespace Packages\" for details.)\\n\"' % (package,)\n )\n return init_py\n\n def initialize_options(self):\n self.packages_checked = {}\n orig.build_py.initialize_options(self)\n\n def get_package_dir(self, package):\n res = orig.build_py.get_package_dir(self, package)\n if self.distribution.src_root is not None:\n return os.path.join(self.distribution.src_root, res)\n return res\n\n def exclude_data_files(self, package, src_dir, files):\n \"\"\"Filter filenames for package's data files in 'src_dir'\"\"\"\n files = list(files)\n patterns = self._get_platform_patterns(\n self.exclude_package_data,\n package,\n src_dir,\n )\n match_groups = (\n fnmatch.filter(files, pattern)\n for pattern in patterns\n )\n # flatten the groups of matches into an iterable of matches\n matches = itertools.chain.from_iterable(match_groups)\n bad = set(matches)\n keepers = (\n fn\n for fn in files\n if fn not in bad\n )\n # ditch dupes\n return list(_unique_everseen(keepers))\n\n @staticmethod\n def _get_platform_patterns(spec, package, src_dir):\n \"\"\"\n yield platform-specific path patterns (suitable for glob\n or fn_match) from a glob-based spec (such as\n self.package_data or self.exclude_package_data)\n matching package in src_dir.\n \"\"\"\n raw_patterns = itertools.chain(\n spec.get('', []),\n spec.get(package, []),\n )\n return (\n # Each pattern has to be converted to a platform-specific path\n os.path.join(src_dir, convert_path(pattern))\n for pattern in raw_patterns\n )\n\n\n# from Python docs\ndef _unique_everseen(iterable, key=None):\n \"List unique elements, preserving order. Remember all elements ever seen.\"\n # unique_everseen('AAAABBBCCDAABBB') --> A B C D\n # unique_everseen('ABBCcAD', str.lower) --> A B C D\n seen = set()\n seen_add = seen.add\n if key is None:\n for element in filterfalse(seen.__contains__, iterable):\n seen_add(element)\n yield element\n else:\n for element in iterable:\n k = key(element)\n if k not in seen:\n seen_add(k)\n yield element\n\n\ndef assert_relative(path):\n if not os.path.isabs(path):\n return path\n from distutils.errors import DistutilsSetupError\n\n msg = textwrap.dedent(\"\"\"\n Error: setup script specifies an absolute path:\n\n %s\n\n setup() arguments must *always* be /-separated paths relative to the\n setup.py directory, *never* absolute paths.\n \"\"\").lstrip() % path\n raise DistutilsSetupError(msg)\n", "path": "setuptools/command/build_py.py"}], "after_files": [{"content": "from glob import glob\nfrom distutils.util import convert_path\nimport distutils.command.build_py as orig\nimport os\nimport fnmatch\nimport textwrap\nimport io\nimport distutils.errors\nimport itertools\n\nfrom setuptools.extern import six\nfrom setuptools.extern.six.moves import map, filter, filterfalse\n\ntry:\n from setuptools.lib2to3_ex import Mixin2to3\nexcept ImportError:\n\n class Mixin2to3:\n def run_2to3(self, files, doctests=True):\n \"do nothing\"\n\n\nclass build_py(orig.build_py, Mixin2to3):\n \"\"\"Enhanced 'build_py' command that includes data files with packages\n\n The data files are specified via a 'package_data' argument to 'setup()'.\n See 'setuptools.dist.Distribution' for more details.\n\n Also, this version of the 'build_py' command allows you to specify both\n 'py_modules' and 'packages' in the same setup operation.\n \"\"\"\n\n def finalize_options(self):\n orig.build_py.finalize_options(self)\n self.package_data = self.distribution.package_data\n self.exclude_package_data = (self.distribution.exclude_package_data or\n {})\n if 'data_files' in self.__dict__:\n del self.__dict__['data_files']\n self.__updated_files = []\n self.__doctests_2to3 = []\n\n def run(self):\n \"\"\"Build modules, packages, and copy data files to build directory\"\"\"\n if not self.py_modules and not self.packages:\n return\n\n if self.py_modules:\n self.build_modules()\n\n if self.packages:\n self.build_packages()\n self.build_package_data()\n\n self.run_2to3(self.__updated_files, False)\n self.run_2to3(self.__updated_files, True)\n self.run_2to3(self.__doctests_2to3, True)\n\n # Only compile actual .py files, using our base class' idea of what our\n # output files are.\n self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))\n\n def __getattr__(self, attr):\n \"lazily compute data files\"\n if attr == 'data_files':\n self.data_files = self._get_data_files()\n return self.data_files\n return orig.build_py.__getattr__(self, attr)\n\n def build_module(self, module, module_file, package):\n if six.PY2 and isinstance(package, six.string_types):\n # avoid errors on Python 2 when unicode is passed (#190)\n package = package.split('.')\n outfile, copied = orig.build_py.build_module(self, module, module_file,\n package)\n if copied:\n self.__updated_files.append(outfile)\n return outfile, copied\n\n def _get_data_files(self):\n \"\"\"Generate list of '(package,src_dir,build_dir,filenames)' tuples\"\"\"\n self.analyze_manifest()\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def _get_pkg_data_files(self, package):\n # Locate package source directory\n src_dir = self.get_package_dir(package)\n\n # Compute package build directory\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\n\n # Strip directory from globbed filenames\n filenames = [\n os.path.relpath(file, src_dir)\n for file in self.find_data_files(package, src_dir)\n ]\n return package, src_dir, build_dir, filenames\n\n def find_data_files(self, package, src_dir):\n \"\"\"Return filenames for package's data files in 'src_dir'\"\"\"\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)\n\n def build_package_data(self):\n \"\"\"Copy data files into build directory\"\"\"\n for package, src_dir, build_dir, filenames in self.data_files:\n for filename in filenames:\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n outf, copied = self.copy_file(srcfile, target, preserve_mode=False)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n self.__doctests_2to3.append(outf)\n\n def analyze_manifest(self):\n self.manifest_files = mf = {}\n if not self.distribution.include_package_data:\n return\n src_dirs = {}\n for package in self.packages or ():\n # Locate package source directory\n src_dirs[assert_relative(self.get_package_dir(package))] = package\n\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n for path in ei_cmd.filelist.files:\n d, f = os.path.split(assert_relative(path))\n prev = None\n oldf = f\n while d and d != prev and d not in src_dirs:\n prev = d\n d, df = os.path.split(d)\n f = os.path.join(df, f)\n if d in src_dirs:\n if path.endswith('.py') and f == oldf:\n continue # it's a module, not data\n mf.setdefault(src_dirs[d], []).append(path)\n\n def get_data_files(self):\n pass # Lazily compute data files in _get_data_files() function.\n\n def check_package(self, package, package_dir):\n \"\"\"Check namespace packages' __init__ for declare_namespace\"\"\"\n try:\n return self.packages_checked[package]\n except KeyError:\n pass\n\n init_py = orig.build_py.check_package(self, package, package_dir)\n self.packages_checked[package] = init_py\n\n if not init_py or not self.distribution.namespace_packages:\n return init_py\n\n for pkg in self.distribution.namespace_packages:\n if pkg == package or pkg.startswith(package + '.'):\n break\n else:\n return init_py\n\n with io.open(init_py, 'rb') as f:\n contents = f.read()\n if b'declare_namespace' not in contents:\n raise distutils.errors.DistutilsError(\n \"Namespace package problem: %s is a namespace package, but \"\n \"its\\n__init__.py does not call declare_namespace()! Please \"\n 'fix it.\\n(See the setuptools manual under '\n '\"Namespace Packages\" for details.)\\n\"' % (package,)\n )\n return init_py\n\n def initialize_options(self):\n self.packages_checked = {}\n orig.build_py.initialize_options(self)\n\n def get_package_dir(self, package):\n res = orig.build_py.get_package_dir(self, package)\n if self.distribution.src_root is not None:\n return os.path.join(self.distribution.src_root, res)\n return res\n\n def exclude_data_files(self, package, src_dir, files):\n \"\"\"Filter filenames for package's data files in 'src_dir'\"\"\"\n files = list(files)\n patterns = self._get_platform_patterns(\n self.exclude_package_data,\n package,\n src_dir,\n )\n match_groups = (\n fnmatch.filter(files, pattern)\n for pattern in patterns\n )\n # flatten the groups of matches into an iterable of matches\n matches = itertools.chain.from_iterable(match_groups)\n bad = set(matches)\n keepers = (\n fn\n for fn in files\n if fn not in bad\n )\n # ditch dupes\n return list(_unique_everseen(keepers))\n\n @staticmethod\n def _get_platform_patterns(spec, package, src_dir):\n \"\"\"\n yield platform-specific path patterns (suitable for glob\n or fn_match) from a glob-based spec (such as\n self.package_data or self.exclude_package_data)\n matching package in src_dir.\n \"\"\"\n raw_patterns = itertools.chain(\n spec.get('', []),\n spec.get(package, []),\n )\n return (\n # Each pattern has to be converted to a platform-specific path\n os.path.join(src_dir, convert_path(pattern))\n for pattern in raw_patterns\n )\n\n\n# from Python docs\ndef _unique_everseen(iterable, key=None):\n \"List unique elements, preserving order. Remember all elements ever seen.\"\n # unique_everseen('AAAABBBCCDAABBB') --> A B C D\n # unique_everseen('ABBCcAD', str.lower) --> A B C D\n seen = set()\n seen_add = seen.add\n if key is None:\n for element in filterfalse(seen.__contains__, iterable):\n seen_add(element)\n yield element\n else:\n for element in iterable:\n k = key(element)\n if k not in seen:\n seen_add(k)\n yield element\n\n\ndef assert_relative(path):\n if not os.path.isabs(path):\n return path\n from distutils.errors import DistutilsSetupError\n\n msg = textwrap.dedent(\"\"\"\n Error: setup script specifies an absolute path:\n\n %s\n\n setup() arguments must *always* be /-separated paths relative to the\n setup.py directory, *never* absolute paths.\n \"\"\").lstrip() % path\n raise DistutilsSetupError(msg)\n", "path": "setuptools/command/build_py.py"}]} | 3,414 | 147 |
gh_patches_debug_62498 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django debug toolbar is not working
I just noticed that while working in my development environment (devcontainer setup), the Django debug toolbar doesn't show. Perhaps this was as a result of #1454?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/cantusdb/settings.py`
Content:
```
1 """
2 Django settings for cantusdb project.
3
4 Generated by 'django-admin startproject' using Django 3.0.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.0/ref/settings/
11 """
12
13 import os
14 from distutils.util import strtobool
15 from django.contrib.messages import constants as messages
16
17 # https://ordinarycoders.com/blog/article/django-messages-framework
18 MESSAGE_TAGS = {
19 messages.DEBUG: "alert-secondary",
20 messages.INFO: "alert-info",
21 messages.SUCCESS: "alert-success",
22 messages.WARNING: "alert-warning",
23 messages.ERROR: "alert-danger",
24 }
25
26 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
27 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28
29 STATIC_ROOT = os.getenv("CANTUSDB_STATIC_ROOT")
30 MEDIA_ROOT = os.getenv("CANTUSDB_MEDIA_ROOT")
31
32 # Quick-start development settings - unsuitable for production
33 # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
34
35 # SECURITY WARNING: keep the secret key used in production secret!
36 SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
37
38 PROJECT_ENVIRONMENT = os.getenv("PROJECT_ENVIRONMENT")
39
40 # SECURITY WARNING: don't run with debug turned on in production!
41 DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT=="DEVELOPMENT"
42
43 ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOST")]
44 CSRF_TRUSTED_ORIGINS = [f'https://{os.getenv("CANTUSDB_HOST")}']
45 if PROJECT_ENVIRONMENT == "DEVELOPMENT":
46 DEBUG = True
47
48 # Application definition
49
50 INSTALLED_APPS = [
51 "dal",
52 "dal_select2",
53 "django.contrib.admin",
54 "django.contrib.auth",
55 "django.contrib.contenttypes",
56 "django.contrib.sessions",
57 "django.contrib.messages",
58 "django.contrib.staticfiles",
59 "django.contrib.sites",
60 "django.contrib.flatpages",
61 "django.contrib.humanize",
62 "django.contrib.postgres",
63 "extra_views",
64 "main_app",
65 "articles",
66 "django_quill", # to provide rich-text field for articles
67 "reversion", # django-reversion, for version history of objects in database
68 "users",
69 ]
70
71 MIDDLEWARE = [
72 "django.middleware.security.SecurityMiddleware",
73 "django.contrib.sessions.middleware.SessionMiddleware",
74 "django.middleware.common.CommonMiddleware",
75 "django.middleware.csrf.CsrfViewMiddleware",
76 "django.contrib.auth.middleware.AuthenticationMiddleware",
77 "django.contrib.messages.middleware.MessageMiddleware",
78 "django.middleware.clickjacking.XFrameOptionsMiddleware",
79 "django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
80 "reversion.middleware.RevisionMiddleware",
81 ]
82
83 ROOT_URLCONF = "cantusdb.urls"
84
85 TEMPLATES = [
86 {
87 "BACKEND": "django.template.backends.django.DjangoTemplates",
88 "DIRS": [os.path.join(BASE_DIR, "templates")],
89 "APP_DIRS": True,
90 "OPTIONS": {
91 "context_processors": [
92 "django.template.context_processors.debug",
93 "django.template.context_processors.request",
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 "main_app.context_processors.determine_project_environment",
97 ],
98 },
99 },
100 ]
101
102 TEMPLATE_LOADERS = "django.template.loaders.app_directories.load_template_source"
103
104 WSGI_APPLICATION = "cantusdb.wsgi.application"
105
106
107 # Database
108 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
109
110 DATABASES = {
111 "default": {
112 "ENGINE": "django.db.backends.postgresql",
113 "NAME": os.getenv("POSTGRES_DB"),
114 "USER": os.getenv("POSTGRES_USER"),
115 "HOST": os.getenv("POSTGRES_HOST"),
116 "PORT": os.getenv("POSTGRES_PORT"),
117 "PASSWORD": os.getenv("POSTGRES_PASSWORD"),
118 }
119 }
120
121
122 # Password validation
123 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
124
125 AUTH_PASSWORD_VALIDATORS = [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139
140
141 # Internationalization
142 # https://docs.djangoproject.com/en/3.0/topics/i18n/
143
144 LANGUAGE_CODE = "en-us"
145
146 TIME_ZONE = "UTC"
147
148 USE_I18N = True
149
150 USE_L10N = True
151
152 USE_TZ = True
153
154
155 # Static files (CSS, JavaScript, Images)
156 # https://docs.djangoproject.com/en/3.0/howto/static-files/
157
158 STATIC_URL = "/static/"
159
160 STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
161
162 AUTH_USER_MODEL = "users.User"
163 LOGIN_REDIRECT_URL = "/"
164 LOGIN_URL = "/login/"
165 LOGOUT_REDIRECT_URL = "/login/"
166
167 SITE_ID = 4
168
169 # New in django 3.2: specify the default type of auto-created primary keys
170 # https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
171 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
172
173 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
174 EMAIL_HOST = "email-smtp.us-west-2.amazonaws.com"
175 EMAIL_PORT = 587
176 EMAIL_HOST_USER = os.getenv("AWS_EMAIL_HOST_USER")
177 EMAIL_HOST_PASSWORD = os.getenv("AWS_EMAIL_HOST_PASSWORD")
178 EMAIL_USE_TLS = True
179
180 DEFAULT_FROM_EMAIL = "[email protected]"
181
182 # automatically disable all panels which user can then manually enable
183 DEBUG_TOOLBAR_CONFIG = {
184 "DISABLE_PANELS": {
185 "debug_toolbar.panels.history.HistoryPanel",
186 "debug_toolbar.panels.versions.VersionsPanel",
187 "debug_toolbar.panels.timer.TimerPanel",
188 "debug_toolbar.panels.settings.SettingsPanel",
189 "debug_toolbar.panels.headers.HeadersPanel",
190 "debug_toolbar.panels.request.RequestPanel",
191 "debug_toolbar.panels.sql.SQLPanel",
192 "debug_toolbar.panels.staticfiles.StaticFilesPanel",
193 "debug_toolbar.panels.templates.TemplatesPanel",
194 "debug_toolbar.panels.cache.CachePanel",
195 "debug_toolbar.panels.signals.SignalsPanel",
196 "debug_toolbar.panels.logging.LoggingPanel",
197 "debug_toolbar.panels.redirects.RedirectsPanel",
198 "debug_toolbar.panels.profiling.ProfilingPanel",
199 },
200 }
201
202 INTERNAL_IPS = [
203 "127.0.0.1",
204 ]
205
206 if DEBUG:
207 INSTALLED_APPS.append("debug_toolbar")
208 # debug toolbar must be inserted as early in the middleware as possible
209 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py
--- a/django/cantusdb_project/cantusdb/settings.py
+++ b/django/cantusdb_project/cantusdb/settings.py
@@ -197,6 +197,9 @@
"debug_toolbar.panels.redirects.RedirectsPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
},
+ "SHOW_TOOLBAR_CALLBACK": lambda request: (
+ False if request.headers.get("x-requested-with") == "XMLHttpRequest" else True
+ ),
}
INTERNAL_IPS = [
| {"golden_diff": "diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py\n--- a/django/cantusdb_project/cantusdb/settings.py\n+++ b/django/cantusdb_project/cantusdb/settings.py\n@@ -197,6 +197,9 @@\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n+ \"SHOW_TOOLBAR_CALLBACK\": lambda request: (\n+ False if request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\" else True\n+ ),\n }\n \n INTERNAL_IPS = [\n", "issue": "Django debug toolbar is not working\nI just noticed that while working in my development environment (devcontainer setup), the Django debug toolbar doesn't show. Perhaps this was as a result of #1454? \n", "before_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\nPROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n\nALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOST\")]\nCSRF_TRUSTED_ORIGINS = [f'https://{os.getenv(\"CANTUSDB_HOST\")}']\nif PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n DEBUG = True\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\nPROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n\nALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOST\")]\nCSRF_TRUSTED_ORIGINS = [f'https://{os.getenv(\"CANTUSDB_HOST\")}']\nif PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n DEBUG = True\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n \"SHOW_TOOLBAR_CALLBACK\": lambda request: (\n False if request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\" else True\n ),\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}]} | 2,335 | 148 |
gh_patches_debug_6639 | rasdani/github-patches | git_diff | ethereum__web3.py-2217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using typing-extensions 4 with web3.py and python < 3.8
Hey guys,
This is question on typing-extensions package. In setup.py https://github.com/ethereum/web3.py/blob/74fbcd1761407ee37808173b089d8e44e537fb99/setup.py#L88-L89
you require <4 and python <3.8. From what I understand that is since these extensions you use have made it into stdlib's typing module from 3.8 and on.
But from what I see this requirement creates a probem for projects like ours which are still in python 3.7, use web3.py and want to use typing-extensions 4. Is there any reason for the < 4 requirement?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8 extras_require = {
9 'tester': [
10 "eth-tester[py-evm]==v0.6.0-beta.4",
11 "py-geth>=3.6.0,<4",
12 ],
13 'linter': [
14 "flake8==3.8.3",
15 "isort>=4.2.15,<4.3.5",
16 "mypy==0.812",
17 ],
18 'docs': [
19 "mock",
20 "sphinx-better-theme>=0.1.4",
21 "click>=5.1",
22 "configparser==3.5.0",
23 "contextlib2>=0.5.4",
24 "py-geth>=3.6.0,<4",
25 "py-solc>=0.4.0",
26 "pytest>=4.4.0,<5.0.0",
27 "sphinx>=3.0,<4",
28 "sphinx_rtd_theme>=0.1.9",
29 "toposort>=1.4",
30 "towncrier==18.5.0",
31 "urllib3",
32 "wheel"
33 ],
34 'dev': [
35 "bumpversion",
36 "flaky>=3.7.0,<4",
37 "hypothesis>=3.31.2,<6",
38 "pytest>=4.4.0,<5.0.0",
39 "pytest-asyncio>=0.10.0,<0.11",
40 "pytest-mock>=1.10,<2",
41 "pytest-pythonpath>=0.3",
42 "pytest-watch>=4.2,<5",
43 "pytest-xdist>=1.29,<2",
44 "setuptools>=38.6.0",
45 "tox>=1.8.0",
46 "tqdm>4.32,<5",
47 "twine>=1.13,<2",
48 "pluggy==0.13.1",
49 "when-changed>=0.3.0,<0.4"
50 ]
51 }
52
53 extras_require['dev'] = (
54 extras_require['tester']
55 + extras_require['linter']
56 + extras_require['docs']
57 + extras_require['dev']
58 )
59
60 with open('./README.md') as readme:
61 long_description = readme.read()
62
63 setup(
64 name='web3',
65 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
66 version='5.25.0',
67 description="""Web3.py""",
68 long_description_content_type='text/markdown',
69 long_description=long_description,
70 author='Piper Merriam',
71 author_email='[email protected]',
72 url='https://github.com/ethereum/web3.py',
73 include_package_data=True,
74 install_requires=[
75 "aiohttp>=3.7.4.post0,<4",
76 "eth-abi>=2.0.0b6,<3.0.0",
77 "eth-account>=0.5.6,<0.6.0",
78 "eth-hash[pycryptodome]>=0.2.0,<1.0.0",
79 "eth-typing>=2.0.0,<3.0.0",
80 "eth-utils>=1.9.5,<2.0.0",
81 "hexbytes>=0.1.0,<1.0.0",
82 "ipfshttpclient==0.8.0a2",
83 "jsonschema>=3.2.0,<4.0.0",
84 "lru-dict>=1.1.6,<2.0.0",
85 "protobuf>=3.10.0,<4",
86 "pywin32>=223;platform_system=='Windows'",
87 "requests>=2.16.0,<3.0.0",
88 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat
89 "typing-extensions>=3.7.4.1,<4;python_version<'3.8'",
90 "websockets>=9.1,<10",
91 ],
92 python_requires='>=3.6,<4',
93 extras_require=extras_require,
94 py_modules=['web3', 'ens', 'ethpm'],
95 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]},
96 license="MIT",
97 zip_safe=False,
98 keywords='ethereum',
99 packages=find_packages(exclude=["tests", "tests.*"]),
100 package_data={"web3": ["py.typed"]},
101 classifiers=[
102 'Development Status :: 5 - Production/Stable',
103 'Intended Audience :: Developers',
104 'License :: OSI Approved :: MIT License',
105 'Natural Language :: English',
106 'Programming Language :: Python :: 3',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Programming Language :: Python :: 3.8',
110 'Programming Language :: Python :: 3.9',
111 ],
112 )
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -86,7 +86,7 @@
"pywin32>=223;platform_system=='Windows'",
"requests>=2.16.0,<3.0.0",
# remove typing_extensions after python_requires>=3.8, see web3._utils.compat
- "typing-extensions>=3.7.4.1,<4;python_version<'3.8'",
+ "typing-extensions>=3.7.4.1,<5;python_version<'3.8'",
"websockets>=9.1,<10",
],
python_requires='>=3.6,<4',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,7 +86,7 @@\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n- \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n+ \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n", "issue": "Using typing-extensions 4 with web3.py and python < 3.8\nHey guys,\r\n\r\nThis is question on typing-extensions package. In setup.py https://github.com/ethereum/web3.py/blob/74fbcd1761407ee37808173b089d8e44e537fb99/setup.py#L88-L89\r\n\r\nyou require <4 and python <3.8. From what I understand that is since these extensions you use have made it into stdlib's typing module from 3.8 and on.\r\n\r\nBut from what I see this requirement creates a probem for projects like ours which are still in python 3.7, use web3.py and want to use typing-extensions 4. Is there any reason for the < 4 requirement?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.4\",\n \"py-geth>=3.6.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.812\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.6.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.25.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.6,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.4\",\n \"py-geth>=3.6.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.812\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.6.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.25.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.6,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}]} | 1,786 | 158 |
gh_patches_debug_670 | rasdani/github-patches | git_diff | pytorch__examples-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Division error
Training a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization.
- python2.7
- torch==0.1.12.post2
- torchvision==0.1.8
````
Traceback (most recent call last):
File "neural_style/neural_style.py", line 226, in <module>
main()
File "neural_style/neural_style.py", line 220, in main
train(args)
File "neural_style/neural_style.py", line 65, in train
style_v = utils.normalize_batch(style_v)
File "/home/paperspace/embro/neural_style/utils.py", line 42, in normalize_batch
batch /= Variable(std)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 793, in __idiv__
return self.div_(other)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 323, in div_
raise RuntimeError("div_ only supports scalar multiplication")
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fast_neural_style/neural_style/utils.py`
Content:
```
1 import torch
2 from PIL import Image
3 from torch.autograd import Variable
4
5
6 def load_image(filename, size=None, scale=None):
7 img = Image.open(filename)
8 if size is not None:
9 img = img.resize((size, size), Image.ANTIALIAS)
10 elif scale is not None:
11 img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
12 return img
13
14
15 def save_image(filename, data):
16 img = data.clone().clamp(0, 255).numpy()
17 img = img.transpose(1, 2, 0).astype("uint8")
18 img = Image.fromarray(img)
19 img.save(filename)
20
21
22 def gram_matrix(y):
23 (b, ch, h, w) = y.size()
24 features = y.view(b, ch, w * h)
25 features_t = features.transpose(1, 2)
26 gram = features.bmm(features_t) / (ch * h * w)
27 return gram
28
29
30 def normalize_batch(batch):
31 # normalize using imagenet mean and std
32 mean = batch.data.new(batch.data.size())
33 std = batch.data.new(batch.data.size())
34 mean[:, 0, :, :] = 0.485
35 mean[:, 1, :, :] = 0.456
36 mean[:, 2, :, :] = 0.406
37 std[:, 0, :, :] = 0.229
38 std[:, 1, :, :] = 0.224
39 std[:, 2, :, :] = 0.225
40 batch = torch.div(batch, 255.0)
41 batch -= Variable(mean)
42 batch /= Variable(std)
43 return batch
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py
--- a/fast_neural_style/neural_style/utils.py
+++ b/fast_neural_style/neural_style/utils.py
@@ -39,5 +39,5 @@
std[:, 2, :, :] = 0.225
batch = torch.div(batch, 255.0)
batch -= Variable(mean)
- batch /= Variable(std)
+ batch = batch / Variable(std)
return batch
| {"golden_diff": "diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py\n--- a/fast_neural_style/neural_style/utils.py\n+++ b/fast_neural_style/neural_style/utils.py\n@@ -39,5 +39,5 @@\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n- batch /= Variable(std)\n+ batch = batch / Variable(std)\n return batch\n", "issue": "Division error\nTraining a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization. \r\n\r\n- python2.7\r\n- torch==0.1.12.post2\r\n- torchvision==0.1.8\r\n\r\n````\r\nTraceback (most recent call last):\r\n File \"neural_style/neural_style.py\", line 226, in <module>\r\n main()\r\n File \"neural_style/neural_style.py\", line 220, in main\r\n train(args)\r\n File \"neural_style/neural_style.py\", line 65, in train\r\n style_v = utils.normalize_batch(style_v)\r\n File \"/home/paperspace/embro/neural_style/utils.py\", line 42, in normalize_batch\r\n batch /= Variable(std)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 793, in __idiv__\r\n return self.div_(other)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 323, in div_\r\n raise RuntimeError(\"div_ only supports scalar multiplication\")\r\n````\r\n\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom PIL import Image\nfrom torch.autograd import Variable\n\n\ndef load_image(filename, size=None, scale=None):\n img = Image.open(filename)\n if size is not None:\n img = img.resize((size, size), Image.ANTIALIAS)\n elif scale is not None:\n img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)\n return img\n\n\ndef save_image(filename, data):\n img = data.clone().clamp(0, 255).numpy()\n img = img.transpose(1, 2, 0).astype(\"uint8\")\n img = Image.fromarray(img)\n img.save(filename)\n\n\ndef gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / (ch * h * w)\n return gram\n\n\ndef normalize_batch(batch):\n # normalize using imagenet mean and std\n mean = batch.data.new(batch.data.size())\n std = batch.data.new(batch.data.size())\n mean[:, 0, :, :] = 0.485\n mean[:, 1, :, :] = 0.456\n mean[:, 2, :, :] = 0.406\n std[:, 0, :, :] = 0.229\n std[:, 1, :, :] = 0.224\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n batch /= Variable(std)\n return batch\n", "path": "fast_neural_style/neural_style/utils.py"}], "after_files": [{"content": "import torch\nfrom PIL import Image\nfrom torch.autograd import Variable\n\n\ndef load_image(filename, size=None, scale=None):\n img = Image.open(filename)\n if size is not None:\n img = img.resize((size, size), Image.ANTIALIAS)\n elif scale is not None:\n img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)\n return img\n\n\ndef save_image(filename, data):\n img = data.clone().clamp(0, 255).numpy()\n img = img.transpose(1, 2, 0).astype(\"uint8\")\n img = Image.fromarray(img)\n img.save(filename)\n\n\ndef gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / (ch * h * w)\n return gram\n\n\ndef normalize_batch(batch):\n # normalize using imagenet mean and std\n mean = batch.data.new(batch.data.size())\n std = batch.data.new(batch.data.size())\n mean[:, 0, :, :] = 0.485\n mean[:, 1, :, :] = 0.456\n mean[:, 2, :, :] = 0.406\n std[:, 0, :, :] = 0.229\n std[:, 1, :, :] = 0.224\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n batch = batch / Variable(std)\n return batch\n", "path": "fast_neural_style/neural_style/utils.py"}]} | 983 | 116 |
gh_patches_debug_17674 | rasdani/github-patches | git_diff | feast-dev__feast-1504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.6 Local Mode breaks due to usage of pathlib.Path and sqlite3
## Expected Behavior
Python 3.6 in local mode works when following the quickstart
## Current Behavior
Unable to follow quickstart due to the error as shown below
## Steps to reproduce
1. Create a Python 3.6 environment. E.g. `conda create --name=feast36 python=3.6`
2. Install feast and other deps `pip install feast`
3. Follow the quickstart
When running the quick start it will fail with the following msg.
```
(feast36) ➜ temp_feat$ feast init feat1
Creating a new Feast repository in /home/user/Documents/temp_feat/feat1.
(feast36) ➜ temp_feat$ cd feat1
(feast36) ➜ feat1$ feast apply
Registered entity driver_id
Registered feature view driver_hourly_stats
Deploying infrastructure for driver_hourly_stats
Traceback (most recent call last):
File "/home/user/anaconda3/envs/feast36/bin/feast", line 8, in <module>
sys.exit(cli())
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/cli.py", line 160, in apply_total_command
apply_total(repo_config, Path.cwd())
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/repo_operations.py", line 148, in apply_total
partial=False,
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py", line 55, in update_infra
conn = self._get_conn()
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py", line 45, in _get_conn
self._db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
TypeError: argument 1 must be str, not PosixPath
```
### Specifications
- Version: 3.6
- Platform: Ubuntu 20.04, also tested on Ubuntu 18.05
- Subsystem:
## Possible Solution
The sqlite3 issue is resolved in Python 3.7 as shown here:
https://bugs.python.org/issue33496
A solution could be to add `self._db_path = str(self._db_path)` or similar in the `infra/local.py` file
I couldn't find a similar issue - in the case its resolved in an upstream commit...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import subprocess
18
19 from distutils.cmd import Command
20 from setuptools import find_packages
21
22 try:
23 from setuptools import setup
24 from setuptools.command.install import install
25 from setuptools.command.develop import develop
26 from setuptools.command.egg_info import egg_info
27 from setuptools.command.sdist import sdist
28 from setuptools.command.build_py import build_py
29 except ImportError:
30 from distutils.core import setup
31 from distutils.command.install import install
32 from distutils.command.build_py import build_py
33
34 NAME = "feast"
35 DESCRIPTION = "Python SDK for Feast"
36 URL = "https://github.com/feast-dev/feast"
37 AUTHOR = "Feast"
38 REQUIRES_PYTHON = ">=3.6.0"
39
40 REQUIRED = [
41 "Click==7.*",
42 "colorama>=0.3.9",
43 "fastavro>=0.22.11,<0.23",
44 "google-api-core>=1.23.0",
45 "google-cloud-bigquery>=2.0.*",
46 "google-cloud-bigquery-storage >= 2.0.0",
47 "google-cloud-storage>=1.20.*",
48 "google-cloud-core==1.4.*",
49 "googleapis-common-protos==1.52.*",
50 "grpcio==1.31.0",
51 "Jinja2>=2.0.0",
52 "jsonschema",
53 "mmh3",
54 "numpy<1.20.0",
55 "pandas~=1.0.0",
56 "pandavro==1.5.*",
57 "protobuf>=3.10",
58 "pyarrow==2.0.0",
59 "pydantic>=1.0.0",
60 "PyYAML==5.3.*",
61 "tabulate==0.8.*",
62 "toml==0.10.*",
63 "tqdm==4.*",
64 ]
65
66 CI_REQUIRED = [
67 "cryptography==3.3.2",
68 "flake8",
69 "black==19.10b0",
70 "isort>=5",
71 "grpcio-tools==1.31.0",
72 "grpcio-testing==1.31.0",
73 "mock==2.0.0",
74 "moto",
75 "mypy==0.790",
76 "mypy-protobuf==1.24",
77 "avro==1.10.0",
78 "gcsfs",
79 "urllib3>=1.25.4",
80 "pytest==6.0.0",
81 "pytest-lazy-fixture==0.6.3",
82 "pytest-timeout==1.4.2",
83 "pytest-ordering==0.6.*",
84 "pytest-mock==1.10.4",
85 "Sphinx",
86 "sphinx-rtd-theme",
87 "tenacity",
88 "adlfs==0.5.9",
89 "firebase-admin==4.5.2",
90 "google-cloud-datastore==2.1.0",
91 "pre-commit"
92 ]
93
94 # README file from Feast repo root directory
95 repo_root = (
96 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
97 .communicate()[0]
98 .rstrip()
99 .decode("utf-8")
100 )
101 README_FILE = os.path.join(repo_root, "README.md")
102 with open(os.path.join(README_FILE), "r") as f:
103 LONG_DESCRIPTION = f.read()
104
105 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
106 # Regex modified from default tag regex in:
107 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
108 TAG_REGEX = re.compile(
109 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
110 )
111
112
113 class BuildProtoCommand(Command):
114 description = "Builds the proto files into python files."
115
116 def initialize_options(self):
117 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
118 self.proto_folder = os.path.join(repo_root, "protos")
119 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
120 self.sub_folders = ["core", "serving", "types", "storage"]
121
122 def finalize_options(self):
123 pass
124
125 def _generate_protos(self, path):
126 proto_files = glob.glob(os.path.join(self.proto_folder, path))
127
128 subprocess.check_call(self.protoc + [
129 '-I', self.proto_folder,
130 '--python_out', self.this_package,
131 '--grpc_python_out', self.this_package,
132 '--mypy_out', self.this_package] + proto_files)
133
134 def run(self):
135 for sub_folder in self.sub_folders:
136 self._generate_protos(f'feast/{sub_folder}/*.proto')
137
138 from pathlib import Path
139
140 for path in Path('feast/protos').rglob('*.py'):
141 for folder in self.sub_folders:
142 # Read in the file
143 with open(path, 'r') as file:
144 filedata = file.read()
145
146 # Replace the target string
147 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
148
149 # Write the file out again
150 with open(path, 'w') as file:
151 file.write(filedata)
152
153
154 class BuildCommand(build_py):
155 """Custom build command."""
156
157 def run(self):
158 self.run_command('build_proto')
159 build_py.run(self)
160
161
162 class DevelopCommand(develop):
163 """Custom develop command."""
164
165 def run(self):
166 self.run_command('build_proto')
167 develop.run(self)
168
169
170 setup(
171 name=NAME,
172 author=AUTHOR,
173 description=DESCRIPTION,
174 long_description=LONG_DESCRIPTION,
175 long_description_content_type="text/markdown",
176 python_requires=REQUIRES_PYTHON,
177 url=URL,
178 packages=find_packages(exclude=("tests",)),
179 install_requires=REQUIRED,
180 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
181 # Install dev requirements with: pip install -e .[dev]
182 extras_require={
183 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
184 "ci": CI_REQUIRED
185 },
186 include_package_data=True,
187 license="Apache",
188 classifiers=[
189 # Trove classifiers
190 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
191 "License :: OSI Approved :: Apache Software License",
192 "Programming Language :: Python",
193 "Programming Language :: Python :: 3",
194 "Programming Language :: Python :: 3.6",
195 ],
196 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
197 use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
198 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.31.0", "mypy-protobuf", "sphinx"],
199 package_data={
200 "": [
201 "protos/feast/**/*.proto",
202 "protos/feast/third_party/grpc/health/v1/*.proto",
203 "protos/tensorflow_metadata/proto/v0/*.proto",
204 "feast/protos/feast/**/*.py",
205 "tensorflow_metadata/proto/v0/*.py"
206 ],
207 },
208 cmdclass={
209 "build_proto": BuildProtoCommand,
210 "build_py": BuildCommand,
211 "develop": DevelopCommand,
212 },
213 )
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -35,7 +35,7 @@
DESCRIPTION = "Python SDK for Feast"
URL = "https://github.com/feast-dev/feast"
AUTHOR = "Feast"
-REQUIRES_PYTHON = ">=3.6.0"
+REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = [
"Click==7.*",
@@ -191,7 +191,7 @@
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -35,7 +35,7 @@\n DESCRIPTION = \"Python SDK for Feast\"\n URL = \"https://github.com/feast-dev/feast\"\n AUTHOR = \"Feast\"\n-REQUIRES_PYTHON = \">=3.6.0\"\n+REQUIRES_PYTHON = \">=3.7.0\"\n \n REQUIRED = [\n \"Click==7.*\",\n@@ -191,7 +191,7 @@\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n", "issue": "Python 3.6 Local Mode breaks due to usage of pathlib.Path and sqlite3\n## Expected Behavior \r\n\r\nPython 3.6 in local mode works when following the quickstart \r\n\r\n## Current Behavior\r\n\r\nUnable to follow quickstart due to the error as shown below\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a Python 3.6 environment. E.g. `conda create --name=feast36 python=3.6`\r\n2. Install feast and other deps `pip install feast`\r\n3. Follow the quickstart\r\n\r\nWhen running the quick start it will fail with the following msg.\r\n\r\n```\r\n(feast36) \u279c temp_feat$ feast init feat1\r\n\r\nCreating a new Feast repository in /home/user/Documents/temp_feat/feat1.\r\n\r\n(feast36) \u279c temp_feat$ cd feat1\r\n(feast36) \u279c feat1$ feast apply\r\nRegistered entity driver_id\r\nRegistered feature view driver_hourly_stats\r\nDeploying infrastructure for driver_hourly_stats\r\nTraceback (most recent call last):\r\n File \"/home/user/anaconda3/envs/feast36/bin/feast\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/cli.py\", line 160, in apply_total_command\r\n apply_total(repo_config, Path.cwd())\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/repo_operations.py\", line 148, in apply_total\r\n partial=False,\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py\", line 55, in update_infra\r\n conn = self._get_conn()\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py\", line 45, in _get_conn\r\n self._db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES\r\nTypeError: argument 1 must be str, not PosixPath\r\n```\r\n\r\n\r\n### Specifications\r\n\r\n- Version: 3.6\r\n- Platform: Ubuntu 20.04, also tested on Ubuntu 18.05\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\r\nThe sqlite3 issue is resolved in Python 3.7 as shown here:\r\nhttps://bugs.python.org/issue33496\r\n\r\nA solution could be to add `self._db_path = str(self._db_path)` or similar in the `infra/local.py` file\r\n\r\nI couldn't find a similar issue - in the case its resolved in an upstream commit...\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio==1.31.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy<1.20.0\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow==2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.31.0\",\n \"grpcio-testing==1.31.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"google-cloud-datastore==2.1.0\",\n \"pre-commit\"\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.31.0\", \"mypy-protobuf\", \"sphinx\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio==1.31.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy<1.20.0\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow==2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.31.0\",\n \"grpcio-testing==1.31.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"google-cloud-datastore==2.1.0\",\n \"pre-commit\"\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.31.0\", \"mypy-protobuf\", \"sphinx\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]} | 3,484 | 221 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.