problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_40410
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-909
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test update_pack command
Created in #864
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/models.py`
Content:
```
1 import enum
2 import random
3 import string
4 import typing
5 from uuid import uuid4
6
7 from django.conf import settings
8 from django.db import models
9 from django.urls import reverse
10 from django.utils.translation import ugettext_lazy as _
11
12 from catalog import models as catalog_models
13 from ecommerce import models as ecommerce_models
14 from pages import models as pages_models
15
16
17 def randomize_slug(slug: str) -> str:
18 slug_hash = ''.join(
19 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
20 )
21 return f'{slug}_{slug_hash}'
22
23
24 class SECategoryQuerySet(catalog_models.CategoryQuerySet):
25 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
26 categories_with_pictures = (
27 self
28 .filter(products__page__images__isnull=False)
29 .distinct()
30 )
31
32 return categories_with_pictures.get_ancestors(include_self=True)
33
34
35 class SECategoryManager(
36 catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)
37 ):
38 pass
39
40
41 class Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):
42
43 objects = SECategoryManager()
44 uuid = models.UUIDField(default=uuid4, editable=False)
45
46 @classmethod
47 def get_default_parent(cls):
48 return pages_models.CustomPage.objects.filter(slug='catalog').first()
49
50 @property
51 def image(self):
52 products = self.products.all()
53 return products[0].image if products else None
54
55 def get_absolute_url(self):
56 return reverse('category', args=(self.page.slug,))
57
58
59 class Product(
60 catalog_models.AbstractProduct,
61 catalog_models.AbstractPosition,
62 pages_models.SyncPageMixin
63 ):
64
65 # That's why we are needed to explicitly add objects manager here
66 # because of Django special managers behaviour.
67 # Se se#480 for details.
68 objects = catalog_models.ProductManager()
69
70 category = models.ForeignKey(
71 Category,
72 on_delete=models.CASCADE,
73 null=True,
74 related_name='products',
75 verbose_name=_('category'),
76 )
77
78 tags = models.ManyToManyField(
79 'Tag',
80 related_name='products',
81 blank=True,
82 verbose_name=_('tags'),
83 )
84
85 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
86 uuid = models.UUIDField(default=uuid4, editable=False)
87 purchase_price = models.FloatField(
88 default=0, verbose_name=_('purchase_price'))
89 wholesale_small = models.FloatField(
90 default=0, verbose_name=_('wholesale_small'))
91 wholesale_medium = models.FloatField(
92 default=0, verbose_name=_('wholesale_medium'))
93 wholesale_large = models.FloatField(
94 default=0, verbose_name=_('wholesale_large'))
95
96 in_pack = models.PositiveSmallIntegerField(
97 default=1,
98 verbose_name=_('in pack'),
99 )
100
101 def get_absolute_url(self):
102 return reverse('product', args=(self.vendor_code,))
103
104 @property
105 def average_rate(self):
106 """Return rounded to first decimal averaged rating."""
107 rating = self.product_feedbacks.aggregate(
108 avg=models.Avg('rating')).get('avg', 0)
109 return round(rating, 1)
110
111 @property
112 def feedback_count(self):
113 return self.product_feedbacks.count()
114
115 @property
116 def feedback(self):
117 return self.product_feedbacks.all().order_by('-date')
118
119 def get_params(self):
120 return Tag.objects.filter_by_products([self]).group_tags()
121
122 def get_brand_name(self) -> str:
123 brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
124 return brand.name if brand else ''
125
126
127 class ProductFeedback(models.Model):
128 product = models.ForeignKey(
129 Product, on_delete=models.CASCADE, null=True,
130 related_name='product_feedbacks'
131 )
132
133 date = models.DateTimeField(
134 auto_now=True, db_index=True, verbose_name=_('date'))
135 name = models.CharField(
136 max_length=255, db_index=True, verbose_name=_('name'))
137 rating = models.PositiveSmallIntegerField(
138 default=1, db_index=True, verbose_name=_('rating'))
139 dignities = models.TextField(
140 default='', blank=True, verbose_name=_('dignities'))
141 limitations = models.TextField(
142 default='', blank=True, verbose_name=_('limitations'))
143 general = models.TextField(
144 default='', blank=True, verbose_name=_('limitations'))
145
146
147 class ItemsEnum(enum.EnumMeta):
148 """
149 Provide dict-like `items` method.
150
151 https://docs.python.org/3/library/enum.html#enum-classes
152 """
153
154 def items(self):
155 return [(i.name, i.value) for i in self]
156
157 def __repr__(self):
158 fields = ', '.join(i.name for i in self)
159 return f"<enum '{self.__name__}: {fields}'>"
160
161
162 class PaymentOptions(enum.Enum, metaclass=ItemsEnum):
163 cash = 'Наличные'
164 cashless = 'Безналичные и денежные переводы'
165 AC = 'Банковская карта'
166 PC = 'Яндекс.Деньги'
167 GP = 'Связной (терминал)'
168 AB = 'Альфа-Клик'
169
170 @staticmethod
171 def default():
172 return PaymentOptions.cash
173
174
175 class Order(ecommerce_models.Order):
176 address = models.TextField(blank=True, default='')
177 payment_type = models.CharField(
178 max_length=255,
179 choices=PaymentOptions.items(),
180 default=PaymentOptions.default().name,
181 )
182 comment = models.TextField(blank=True, default='')
183 # total price - total purchase price
184 revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))
185
186 @property
187 def payment_type_label(self):
188 """Return label for an order's payment option."""
189 return PaymentOptions[self.payment_type].value
190
191 def set_positions(self, cart):
192 """
193 Save cart's state into Order instance.
194
195 @todo #589:60m Create Cart model.
196 See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672
197 """
198 self.revenue = cart.total_revenue()
199 self.save()
200 for id_, position in cart:
201 self.positions.create(
202 order=self,
203 product_id=id_,
204 vendor_code=position['vendor_code'],
205 name=position['name'],
206 price=position['price'],
207 quantity=position['quantity'],
208 )
209 return self
210
211
212 class CategoryPage(pages_models.ModelPage):
213 """Create proxy model for Admin."""
214
215 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
216 proxy = True
217
218 # noinspection PyTypeChecker
219 objects = pages_models.ModelPage.create_model_page_managers(Category)
220
221
222 class ProductPage(pages_models.ModelPage):
223 """Create proxy model for Admin."""
224
225 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
226 proxy = True
227
228 # noinspection PyTypeChecker
229 objects = (
230 pages_models.ModelPage
231 .create_model_page_managers(Product)
232 )
233
234
235 class TagGroup(catalog_models.TagGroup):
236 pass
237
238
239 class TagQuerySet(catalog_models.TagQuerySet):
240
241 def products(self):
242 ids = self.values_list('products__id', flat=True)
243 return Product.objects.filter(id__in=ids).distinct()
244
245
246 class TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):
247 pass
248
249
250 class Tag(catalog_models.Tag):
251 group = models.ForeignKey(
252 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
253 )
254
255 objects = TagManager()
256
```
Path: `shopelectro/management/commands/_update_catalog/update_pack.py`
Content:
```
1 """
2 Update Product.in_pack and prices.
3
4 The update_catalog command always resets product prices to per unit format, so:
5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack
6 2. Multiply product prices by in_pack value and save.
7 """
8 import logging
9 import typing
10
11 from django.conf import settings
12 from django.db import models, transaction
13
14 from catalog.models_expressions import Substring
15
16 from shopelectro.models import TagQuerySet, TagGroup
17
18 logger = logging.getLogger(__name__)
19 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
20
21
22 def find_pack_group() -> typing.Optional[TagGroup]:
23 pack_group = TagGroup.objects.filter(uuid=settings.PACK_GROUP_UUID).first()
24
25 # @todo #864:60m Raise errors in find_pack_group.
26 # Remove Optional type as returning value and test find_pack_group.
27 if not pack_group:
28 logger.error(
29 f'Couldn\'t find "{settings.PACK_GROUP_NAME}" tag group by'
30 f'UUID="{settings.PACK_GROUP_UUID}".\n'
31 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID.'
32 )
33 pack_group = None
34 if not settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():
35 logger.error(
36 'The pack group name isn\'t matched with the set name:'
37 f' Pack group name: {pack_group.name}\n'
38 f' Set name: {settings.PACK_GROUP_NAME}\n'
39 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'
40 )
41 pack_group = None
42
43 return pack_group
44
45
46 def update_in_packs(packs: TagQuerySet):
47 """Parse and save in pack quantity values."""
48 packs = (
49 packs
50 .annotate(
51 in_pack_str=Substring(
52 models.F('name'),
53 models.Value('[0-9]+\+?[0-9]*')))
54 .exclude(in_pack_str__exact='')
55 )
56
57 for pack in packs:
58 in_pack = sum(map(int, pack.in_pack_str.split('+')))
59 pack.products.all().update(in_pack=max(in_pack, 1))
60
61
62 def update_prices(packs: TagQuerySet):
63 """Multiply product prices on in pack quantity."""
64 fields_to_update = {}
65 for price in PRICES:
66 fields_to_update[price] = models.F(price) * models.F('in_pack')
67
68 with transaction.atomic():
69 packs.products().update(**fields_to_update)
70
71
72 def main(*args, **kwargs):
73 pack_group = find_pack_group()
74 if not pack_group:
75 return
76
77 return
78
79 packs = pack_group.tags.all().prefetch_related('products')
80 update_in_packs(packs)
81 update_prices(packs)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py
--- a/shopelectro/management/commands/_update_catalog/update_pack.py
+++ b/shopelectro/management/commands/_update_catalog/update_pack.py
@@ -6,39 +6,34 @@
2. Multiply product prices by in_pack value and save.
"""
import logging
-import typing
from django.conf import settings
from django.db import models, transaction
from catalog.models_expressions import Substring
+from shopelectro.exception import UpdateCatalogException
from shopelectro.models import TagQuerySet, TagGroup
logger = logging.getLogger(__name__)
PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
-def find_pack_group() -> typing.Optional[TagGroup]:
- pack_group = TagGroup.objects.filter(uuid=settings.PACK_GROUP_UUID).first()
-
- # @todo #864:60m Raise errors in find_pack_group.
- # Remove Optional type as returning value and test find_pack_group.
- if not pack_group:
- logger.error(
- f'Couldn\'t find "{settings.PACK_GROUP_NAME}" tag group by'
- f'UUID="{settings.PACK_GROUP_UUID}".\n'
- 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID.'
+def find_pack_group() -> TagGroup:
+ try:
+ pack_group = TagGroup.objects.get_pack()
+ except TagGroup.DoesNotExist as error:
+ raise UpdateCatalogException(
+ 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. '
+ + str(error)
)
- pack_group = None
- if not settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():
- logger.error(
+ if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():
+ raise UpdateCatalogException(
'The pack group name isn\'t matched with the set name:'
f' Pack group name: {pack_group.name}\n'
f' Set name: {settings.PACK_GROUP_NAME}\n'
'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'
)
- pack_group = None
return pack_group
@@ -70,12 +65,6 @@
def main(*args, **kwargs):
- pack_group = find_pack_group()
- if not pack_group:
- return
-
- return
-
- packs = pack_group.tags.all().prefetch_related('products')
+ packs = find_pack_group().tags.all().prefetch_related('products')
update_in_packs(packs)
update_prices(packs)
diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -232,8 +232,15 @@
)
+class TagGroupManager(models.Manager):
+
+ def get_pack(self):
+ return self.get_queryset().get(uuid=settings.PACK_GROUP_UUID)
+
+
class TagGroup(catalog_models.TagGroup):
- pass
+
+ objects = TagGroupManager()
class TagQuerySet(catalog_models.TagQuerySet):
@@ -244,7 +251,9 @@
class TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):
- pass
+
+ def get_packs(self):
+ return TagGroup.objects.get_pack().tags.all()
class Tag(catalog_models.Tag):
|
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -6,39 +6,34 @@\n 2. Multiply product prices by in_pack value and save.\n \"\"\"\n import logging\n-import typing\n \n from django.conf import settings\n from django.db import models, transaction\n \n from catalog.models_expressions import Substring\n \n+from shopelectro.exception import UpdateCatalogException\n from shopelectro.models import TagQuerySet, TagGroup\n \n logger = logging.getLogger(__name__)\n PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n \n \n-def find_pack_group() -> typing.Optional[TagGroup]:\n- pack_group = TagGroup.objects.filter(uuid=settings.PACK_GROUP_UUID).first()\n-\n- # @todo #864:60m Raise errors in find_pack_group.\n- # Remove Optional type as returning value and test find_pack_group.\n- if not pack_group:\n- logger.error(\n- f'Couldn\\'t find \"{settings.PACK_GROUP_NAME}\" tag group by'\n- f'UUID=\"{settings.PACK_GROUP_UUID}\".\\n'\n- 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID.'\n+def find_pack_group() -> TagGroup:\n+ try:\n+ pack_group = TagGroup.objects.get_pack()\n+ except TagGroup.DoesNotExist as error:\n+ raise UpdateCatalogException(\n+ 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. '\n+ + str(error)\n )\n- pack_group = None\n- if not settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n- logger.error(\n+ if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n+ raise UpdateCatalogException(\n 'The pack group name isn\\'t matched with the set name:'\n f' Pack group name: {pack_group.name}\\n'\n f' Set name: {settings.PACK_GROUP_NAME}\\n'\n 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'\n )\n- pack_group = None\n \n return pack_group\n \n@@ -70,12 +65,6 @@\n \n \n def main(*args, **kwargs):\n- pack_group = find_pack_group()\n- if not pack_group:\n- return\n-\n- return\n-\n- packs = pack_group.tags.all().prefetch_related('products')\n+ packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\ndiff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -232,8 +232,15 @@\n )\n \n \n+class TagGroupManager(models.Manager):\n+\n+ def get_pack(self):\n+ return self.get_queryset().get(uuid=settings.PACK_GROUP_UUID)\n+\n+\n class TagGroup(catalog_models.TagGroup):\n- pass\n+\n+ objects = TagGroupManager()\n \n \n class TagQuerySet(catalog_models.TagQuerySet):\n@@ -244,7 +251,9 @@\n \n \n class TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):\n- pass\n+\n+ def get_packs(self):\n+ return TagGroup.objects.get_pack().tags.all()\n \n \n class Tag(catalog_models.Tag):\n", "issue": "Test update_pack command\nCreated in #864\n", "before_files": [{"content": "import enum\nimport random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(\n catalog_models.AbstractProduct,\n catalog_models.AbstractPosition,\n pages_models.SyncPageMixin\n):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n in_pack = models.PositiveSmallIntegerField(\n default=1,\n verbose_name=_('in pack'),\n )\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).group_tags()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\nclass ItemsEnum(enum.EnumMeta):\n \"\"\"\n Provide dict-like `items` method.\n\n https://docs.python.org/3/library/enum.html#enum-classes\n \"\"\"\n\n def items(self):\n return [(i.name, i.value) for i in self]\n\n def __repr__(self):\n fields = ', '.join(i.name for i in self)\n return f\"<enum '{self.__name__}: {fields}'>\"\n\n\nclass PaymentOptions(enum.Enum, metaclass=ItemsEnum):\n cash = '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'\n cashless = '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'\n AC = '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'\n PC = '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'\n GP = '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'\n AB = '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'\n\n @staticmethod\n def default():\n return PaymentOptions.cash\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=PaymentOptions.items(),\n default=PaymentOptions.default().name,\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_label(self):\n \"\"\"Return label for an order's payment option.\"\"\"\n return PaymentOptions[self.payment_type].value\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n\n def products(self):\n ids = self.values_list('products__id', flat=True)\n return Product.objects.filter(id__in=ids).distinct()\n\n\nclass TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n objects = TagManager()\n", "path": "shopelectro/models.py"}, {"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\nimport typing\n\nfrom django.conf import settings\nfrom django.db import models, transaction\n\nfrom catalog.models_expressions import Substring\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef find_pack_group() -> typing.Optional[TagGroup]:\n pack_group = TagGroup.objects.filter(uuid=settings.PACK_GROUP_UUID).first()\n\n # @todo #864:60m Raise errors in find_pack_group.\n # Remove Optional type as returning value and test find_pack_group.\n if not pack_group:\n logger.error(\n f'Couldn\\'t find \"{settings.PACK_GROUP_NAME}\" tag group by'\n f'UUID=\"{settings.PACK_GROUP_UUID}\".\\n'\n 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID.'\n )\n pack_group = None\n if not settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n logger.error(\n 'The pack group name isn\\'t matched with the set name:'\n f' Pack group name: {pack_group.name}\\n'\n f' Set name: {settings.PACK_GROUP_NAME}\\n'\n 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'\n )\n pack_group = None\n\n return pack_group\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n packs = (\n packs\n .annotate(\n in_pack_str=Substring(\n models.F('name'),\n models.Value('[0-9]+\\+?[0-9]*')))\n .exclude(in_pack_str__exact='')\n )\n\n for pack in packs:\n in_pack = sum(map(int, pack.in_pack_str.split('+')))\n pack.products.all().update(in_pack=max(in_pack, 1))\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n pack_group = find_pack_group()\n if not pack_group:\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}], "after_files": [{"content": "import enum\nimport random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(\n catalog_models.AbstractProduct,\n catalog_models.AbstractPosition,\n pages_models.SyncPageMixin\n):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n in_pack = models.PositiveSmallIntegerField(\n default=1,\n verbose_name=_('in pack'),\n )\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).group_tags()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\nclass ItemsEnum(enum.EnumMeta):\n \"\"\"\n Provide dict-like `items` method.\n\n https://docs.python.org/3/library/enum.html#enum-classes\n \"\"\"\n\n def items(self):\n return [(i.name, i.value) for i in self]\n\n def __repr__(self):\n fields = ', '.join(i.name for i in self)\n return f\"<enum '{self.__name__}: {fields}'>\"\n\n\nclass PaymentOptions(enum.Enum, metaclass=ItemsEnum):\n cash = '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'\n cashless = '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'\n AC = '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'\n PC = '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'\n GP = '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'\n AB = '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'\n\n @staticmethod\n def default():\n return PaymentOptions.cash\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=PaymentOptions.items(),\n default=PaymentOptions.default().name,\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_label(self):\n \"\"\"Return label for an order's payment option.\"\"\"\n return PaymentOptions[self.payment_type].value\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroupManager(models.Manager):\n\n def get_pack(self):\n return self.get_queryset().get(uuid=settings.PACK_GROUP_UUID)\n\n\nclass TagGroup(catalog_models.TagGroup):\n\n objects = TagGroupManager()\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n\n def products(self):\n ids = self.values_list('products__id', flat=True)\n return Product.objects.filter(id__in=ids).distinct()\n\n\nclass TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):\n\n def get_packs(self):\n return TagGroup.objects.get_pack().tags.all()\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n objects = TagManager()\n", "path": "shopelectro/models.py"}, {"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db import models, transaction\n\nfrom catalog.models_expressions import Substring\n\nfrom shopelectro.exception import UpdateCatalogException\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef find_pack_group() -> TagGroup:\n try:\n pack_group = TagGroup.objects.get_pack()\n except TagGroup.DoesNotExist as error:\n raise UpdateCatalogException(\n 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. '\n + str(error)\n )\n if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n raise UpdateCatalogException(\n 'The pack group name isn\\'t matched with the set name:'\n f' Pack group name: {pack_group.name}\\n'\n f' Set name: {settings.PACK_GROUP_NAME}\\n'\n 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'\n )\n\n return pack_group\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n packs = (\n packs\n .annotate(\n in_pack_str=Substring(\n models.F('name'),\n models.Value('[0-9]+\\+?[0-9]*')))\n .exclude(in_pack_str__exact='')\n )\n\n for pack in packs:\n in_pack = sum(map(int, pack.in_pack_str.split('+')))\n pack.products.all().update(in_pack=max(in_pack, 1))\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]}
| 3,368 | 787 |
gh_patches_debug_38334
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1461
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pydecimal left_digits ignored if min_value or max_value is specified
`max_value` should be set to `10 ** (left_digits -1) - epsilon` before this:
https://github.com/joke2k/faker/blob/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c/faker/providers/python/__init__.py#L92-L102
Use cases for using both include:
- `min_value=0`, since `positive=True` disallows `0` (a bug in itself IMO, but that's an age old debate!)
- `min_value` at all actually? As in, 4 left digits, but no less than 42 in value, for example.
- a `max_value` that has a semantically different reason for existing, so it's convenient to specify in addition to `left_digits` [^]
Work around is to specify a `max_value` (per above) instead of `left_digits` if `min_value` or `max_value` are needed too.
(I will have a PR for this shortly.)
[^] - e.g. `left_digits` could be a database requirement (`NUMERIC(left + right, right)`), but `max_value` something to do with the logic the fake is for.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/python/__init__.py`
Content:
```
1 import string
2 import sys
3 import warnings
4
5 from decimal import Decimal
6
7 from .. import BaseProvider
8
9
10 class Provider(BaseProvider):
11 default_value_types = (
12 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',
13 'date_time', 'uri', 'email',
14 )
15
16 def _check_signature(self, value_types, allowed_types):
17 if value_types is not None and not isinstance(value_types, (list, tuple)):
18 value_types = [value_types]
19 warnings.warn(
20 'Passing value types as positional arguments is going to be '
21 'deprecated. Pass them as a list or tuple instead.',
22 PendingDeprecationWarning,
23 )
24 if value_types is None:
25 value_types = ()
26 return tuple(value_types) + allowed_types
27
28 def pybool(self):
29 return self.random_int(0, 1) == 1
30
31 def pystr(self, min_chars=None, max_chars=20):
32 """
33 Generates a random string of upper and lowercase letters.
34 :type min_chars: int
35 :type max_chars: int
36 :return: String. Random of random length between min and max characters.
37 """
38 if min_chars is None:
39 return "".join(self.random_letters(length=max_chars))
40 else:
41 assert (
42 max_chars >= min_chars), "Maximum length must be greater than or equal to minimum length"
43 return "".join(
44 self.random_letters(
45 length=self.generator.random.randint(min_chars, max_chars),
46 ),
47 )
48
49 def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):
50 return self.bothify(self.generator.parse(string_format), letters=letters)
51
52 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
53 min_value=None, max_value=None):
54 if left_digits is not None and left_digits < 0:
55 raise ValueError(
56 'A float number cannot have less than 0 digits in its '
57 'integer part')
58 if right_digits is not None and right_digits < 0:
59 raise ValueError(
60 'A float number cannot have less than 0 digits in its '
61 'fractional part')
62 if left_digits == 0 and right_digits == 0:
63 raise ValueError(
64 'A float number cannot have less than 0 digits in total')
65 if None not in (min_value, max_value) and min_value > max_value:
66 raise ValueError('Min value cannot be greater than max value')
67 if None not in (min_value, max_value) and min_value == max_value:
68 raise ValueError('Min and max value cannot be the same')
69 if positive and min_value is not None and min_value <= 0:
70 raise ValueError(
71 'Cannot combine positive=True with negative or zero min_value')
72
73 # Make sure at least either left or right is set
74 if left_digits is None and right_digits is None:
75 left_digits = self.random_int(1, sys.float_info.dig - 1)
76
77 # If only one side is set, choose #digits for other side
78 if (left_digits is None) ^ (right_digits is None):
79 if left_digits is None:
80 left_digits = max(1, sys.float_info.dig - right_digits)
81 else:
82 right_digits = max(1, sys.float_info.dig - left_digits)
83
84 # Make sure we don't ask for too many digits!
85 if left_digits + right_digits > sys.float_info.dig:
86 raise ValueError(
87 f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '
88 f'{sys.float_info.dig})',
89 )
90
91 sign = ''
92 if (min_value is not None) or (max_value is not None):
93 if max_value is not None and max_value < 0:
94 max_value += 1 # as the random_int will be generated up to max_value - 1
95 if min_value is not None and min_value < 0:
96 min_value += 1 # as we then append digits after the left_number
97 left_number = self._safe_random_int(
98 min_value, max_value, positive,
99 )
100 else:
101 sign = '+' if positive else self.random_element(('+', '-'))
102 left_number = self.random_number(left_digits)
103
104 result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')
105 if positive and result == 0:
106 if right_digits:
107 result = float('0.' + '0' * (right_digits - 1) + '1')
108 else:
109 result += sys.float_info.epsilon
110 return result
111
112 def _safe_random_int(self, min_value, max_value, positive):
113 orig_min_value = min_value
114 orig_max_value = max_value
115
116 if min_value is None:
117 min_value = max_value - self.random_int()
118 if max_value is None:
119 max_value = min_value + self.random_int()
120 if positive:
121 min_value = max(min_value, 0)
122
123 if min_value == max_value:
124 return self._safe_random_int(orig_min_value, orig_max_value, positive)
125 else:
126 return self.random_int(min_value, max_value - 1)
127
128 def pyint(self, min_value=0, max_value=9999, step=1):
129 return self.generator.random_int(min_value, max_value, step=step)
130
131 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
132 min_value=None, max_value=None):
133
134 float_ = self.pyfloat(
135 left_digits, right_digits, positive, min_value, max_value)
136 return Decimal(str(float_))
137
138 def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
139 return tuple(
140 self._pyiterable(
141 nb_elements,
142 variable_nb_elements,
143 value_types,
144 *allowed_types))
145
146 def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
147 return set(
148 self._pyiterable(
149 nb_elements,
150 variable_nb_elements,
151 value_types,
152 *allowed_types))
153
154 def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
155 return list(
156 self._pyiterable(
157 nb_elements,
158 variable_nb_elements,
159 value_types,
160 *allowed_types))
161
162 def pyiterable(
163 self,
164 nb_elements=10,
165 variable_nb_elements=True,
166 value_types=None,
167 *allowed_types):
168 value_types = self._check_signature(value_types, allowed_types)
169 return self.random_element([self.pylist, self.pytuple, self.pyset])(
170 nb_elements, variable_nb_elements, value_types, *allowed_types)
171
172 def _random_type(self, type_list):
173 value_type = self.random_element(type_list)
174
175 method_name = f'py{value_type}'
176 if hasattr(self, method_name):
177 value_type = method_name
178
179 return self.generator.format(value_type)
180
181 def _pyiterable(
182 self,
183 nb_elements=10,
184 variable_nb_elements=True,
185 value_types=None,
186 *allowed_types):
187
188 value_types = self._check_signature(value_types, allowed_types)
189
190 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
191 for t in value_types
192 # avoid recursion
193 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
194 if not value_types:
195 value_types = self.default_value_types
196
197 if variable_nb_elements:
198 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
199
200 for _ in range(nb_elements):
201 yield self._random_type(value_types)
202
203 def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
204 """
205 Returns a dictionary.
206
207 :nb_elements: number of elements for dictionary
208 :variable_nb_elements: is use variable number of elements for dictionary
209 :value_types: type of dictionary values
210 """
211 if variable_nb_elements:
212 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
213
214 return dict(zip(
215 self.generator.words(nb_elements, unique=True),
216 self._pyiterable(nb_elements, False, value_types, *allowed_types),
217 ))
218
219 def pystruct(self, count=10, value_types=None, *allowed_types):
220 value_types = self._check_signature(value_types, allowed_types)
221
222 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
223 for t in value_types
224 # avoid recursion
225 if t != 'struct']
226 if not value_types:
227 value_types = self.default_value_types
228
229 types = []
230 d = {}
231 nd = {}
232 for i in range(count):
233 d[self.generator.word()] = self._random_type(value_types)
234 types.append(self._random_type(value_types))
235 nd[self.generator.word()] = {i: self._random_type(value_types),
236 i + 1: [self._random_type(value_types),
237 self._random_type(value_types),
238 self._random_type(value_types)],
239 i + 2: {i: self._random_type(value_types),
240 i + 1: self._random_type(value_types),
241 i + 2: [self._random_type(value_types),
242 self._random_type(value_types)]}}
243 return types, d, nd
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -1,3 +1,4 @@
+import math
import string
import sys
import warnings
@@ -69,10 +70,15 @@
if positive and min_value is not None and min_value <= 0:
raise ValueError(
'Cannot combine positive=True with negative or zero min_value')
+ if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:
+ raise ValueError('Max value must fit within left digits')
+ if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:
+ raise ValueError('Min value must fit within left digits')
# Make sure at least either left or right is set
if left_digits is None and right_digits is None:
- left_digits = self.random_int(1, sys.float_info.dig - 1)
+ needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))
+ right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)
# If only one side is set, choose #digits for other side
if (left_digits is None) ^ (right_digits is None):
@@ -90,6 +96,13 @@
sign = ''
if (min_value is not None) or (max_value is not None):
+ # Make sure left_digits still respected
+ if left_digits is not None:
+ if max_value is None:
+ max_value = 10 ** left_digits # minus smallest representable, adjusted later
+ if min_value is None:
+ min_value = -(10 ** left_digits) # plus smallest representable, adjusted later
+
if max_value is not None and max_value < 0:
max_value += 1 # as the random_int will be generated up to max_value - 1
if min_value is not None and min_value < 0:
@@ -107,6 +120,14 @@
result = float('0.' + '0' * (right_digits - 1) + '1')
else:
result += sys.float_info.epsilon
+
+ if right_digits:
+ result = min(result, 10 ** left_digits - float(f'0.{"0" * (right_digits - 1)}1'))
+ result = max(result, -(10 ** left_digits + float(f'0.{"0" * (right_digits - 1)}1')))
+ else:
+ result = min(result, 10 ** left_digits - 1)
+ result = max(result, -(10 ** left_digits + 1))
+
return result
def _safe_random_int(self, min_value, max_value, positive):
|
{"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -1,3 +1,4 @@\n+import math\n import string\n import sys\n import warnings\n@@ -69,10 +70,15 @@\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n+ if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n+ raise ValueError('Max value must fit within left digits')\n+ if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n+ raise ValueError('Min value must fit within left digits')\n \n # Make sure at least either left or right is set\n if left_digits is None and right_digits is None:\n- left_digits = self.random_int(1, sys.float_info.dig - 1)\n+ needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))\n+ right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)\n \n # If only one side is set, choose #digits for other side\n if (left_digits is None) ^ (right_digits is None):\n@@ -90,6 +96,13 @@\n \n sign = ''\n if (min_value is not None) or (max_value is not None):\n+ # Make sure left_digits still respected\n+ if left_digits is not None:\n+ if max_value is None:\n+ max_value = 10 ** left_digits # minus smallest representable, adjusted later\n+ if min_value is None:\n+ min_value = -(10 ** left_digits) # plus smallest representable, adjusted later\n+\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n@@ -107,6 +120,14 @@\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n+\n+ if right_digits:\n+ result = min(result, 10 ** left_digits - float(f'0.{\"0\" * (right_digits - 1)}1'))\n+ result = max(result, -(10 ** left_digits + float(f'0.{\"0\" * (right_digits - 1)}1')))\n+ else:\n+ result = min(result, 10 ** left_digits - 1)\n+ result = max(result, -(10 ** left_digits + 1))\n+\n return result\n \n def _safe_random_int(self, min_value, max_value, positive):\n", "issue": "pydecimal left_digits ignored if min_value or max_value is specified\n`max_value` should be set to `10 ** (left_digits -1) - epsilon` before this:\r\n\r\nhttps://github.com/joke2k/faker/blob/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c/faker/providers/python/__init__.py#L92-L102\r\n\r\nUse cases for using both include:\r\n\r\n- `min_value=0`, since `positive=True` disallows `0` (a bug in itself IMO, but that's an age old debate!)\r\n- `min_value` at all actually? As in, 4 left digits, but no less than 42 in value, for example.\r\n- a `max_value` that has a semantically different reason for existing, so it's convenient to specify in addition to `left_digits` [^]\r\n\r\nWork around is to specify a `max_value` (per above) instead of `left_digits` if `min_value` or `max_value` are needed too.\r\n\r\n(I will have a PR for this shortly.)\r\n\r\n[^] - e.g. `left_digits` could be a database requirement (`NUMERIC(left + right, right)`), but `max_value` something to do with the logic the fake is for.\n", "before_files": [{"content": "import string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n\n # Make sure at least either left or right is set\n if left_digits is None and right_digits is None:\n left_digits = self.random_int(1, sys.float_info.dig - 1)\n\n # If only one side is set, choose #digits for other side\n if (left_digits is None) ^ (right_digits is None):\n if left_digits is None:\n left_digits = max(1, sys.float_info.dig - right_digits)\n else:\n right_digits = max(1, sys.float_info.dig - left_digits)\n\n # Make sure we don't ask for too many digits!\n if left_digits + right_digits > sys.float_info.dig:\n raise ValueError(\n f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '\n f'{sys.float_info.dig})',\n )\n\n sign = ''\n if (min_value is not None) or (max_value is not None):\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}], "after_files": [{"content": "import math\nimport string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n raise ValueError('Max value must fit within left digits')\n if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n raise ValueError('Min value must fit within left digits')\n\n # Make sure at least either left or right is set\n if left_digits is None and right_digits is None:\n needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))\n right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)\n\n # If only one side is set, choose #digits for other side\n if (left_digits is None) ^ (right_digits is None):\n if left_digits is None:\n left_digits = max(1, sys.float_info.dig - right_digits)\n else:\n right_digits = max(1, sys.float_info.dig - left_digits)\n\n # Make sure we don't ask for too many digits!\n if left_digits + right_digits > sys.float_info.dig:\n raise ValueError(\n f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '\n f'{sys.float_info.dig})',\n )\n\n sign = ''\n if (min_value is not None) or (max_value is not None):\n # Make sure left_digits still respected\n if left_digits is not None:\n if max_value is None:\n max_value = 10 ** left_digits # minus smallest representable, adjusted later\n if min_value is None:\n min_value = -(10 ** left_digits) # plus smallest representable, adjusted later\n\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n\n if right_digits:\n result = min(result, 10 ** left_digits - float(f'0.{\"0\" * (right_digits - 1)}1'))\n result = max(result, -(10 ** left_digits + float(f'0.{\"0\" * (right_digits - 1)}1')))\n else:\n result = min(result, 10 ** left_digits - 1)\n result = max(result, -(10 ** left_digits + 1))\n\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]}
| 3,257 | 666 |
gh_patches_debug_21635
|
rasdani/github-patches
|
git_diff
|
google__osv.dev-1082
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't suggest using -X POST
On https://osv.dev/#use-the-api is the instruction
```
Query by commit hash
curl -X POST -d \
'{"commit": "6879efc2c1596d11a6a6ad296f80063b558d5e0f"}' \
"https://api.osv.dev/v1/query"
```
Using `-X POST` here is unnecessary, redundant and potentially dangerous as people cut and paste this into more places. curl will actually tell you this if you add `-v` to this command:
`Note: Unnecessary use of -X or --request, POST is already inferred.`
See also https://daniel.haxx.se/blog/2015/09/11/unnecessary-use-of-curl-x/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/build.py`
Content:
```
1 # Copyright 2021 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Documentation builder."""
15
16 import json
17 import os
18 import shutil
19 import subprocess
20
21 _ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
22 _GENERATED_FILENAME = 'v1/osv_service_v1.swagger.json'
23
24
25 def property_description_workaround(definition):
26 """Work around an OpenAPI limitation with a field descriptions getting
27 replaced by the object descriptions."""
28 # Workaround described in https://github.com/Redocly/redoc/issues/835.
29 for value in definition['properties'].values():
30 if '$ref' in value:
31 value['allOf'] = [{'$ref': value['$ref']}]
32 del value['$ref']
33
34
35 def replace_property_name(definition, key, replacement):
36 """Replace property name."""
37 definition['properties'][replacement] = definition['properties'][key]
38 del definition['properties'][key]
39
40
41 def main():
42 api_dir = os.path.join(_ROOT_DIR, 'gcp', 'api')
43 v1_api_dir = os.path.join(api_dir, 'v1')
44 googleapis_dir = os.path.join(api_dir, 'googleapis')
45 service_proto_path = os.path.join(v1_api_dir, 'osv_service_v1.proto')
46
47 # Add OSV dependencies.
48 osv_path = os.path.join(api_dir, 'osv')
49 if os.path.exists(osv_path):
50 shutil.rmtree(osv_path)
51
52 shutil.copytree(os.path.join(_ROOT_DIR, 'osv'), osv_path)
53
54 subprocess.run([
55 'protoc',
56 '-I',
57 api_dir,
58 '-I',
59 v1_api_dir,
60 '-I',
61 googleapis_dir,
62 '--openapiv2_out',
63 '.',
64 '--openapiv2_opt',
65 'logtostderr=true',
66 service_proto_path,
67 ],
68 check=True)
69
70 with open(_GENERATED_FILENAME) as f:
71 spec = json.load(f)
72
73 spec['host'] = 'api.osv.dev'
74 spec['info']['title'] = 'OSV'
75 spec['info']['version'] = '1.0'
76 spec['tags'] = [{
77 'name': 'api',
78 'x-displayName': 'API',
79 'description': 'The API has 3 methods:'
80 }, {
81 'name': 'vulnerability_schema',
82 'x-displayName': 'Vulnerability schema',
83 'description': 'Please see the [OpenSSF Open Source Vulnerability spec]'
84 '(https://ossf.github.io/osv-schema/).',
85 }]
86
87 spec['x-tagGroups'] = [{
88 'name': 'API',
89 'tags': ['api']
90 }, {
91 'name': 'Schema',
92 'tags': ['vulnerability_schema']
93 }]
94
95 spec['paths']['/v1/query']['post']['tags'] = ['api']
96 spec['paths']['/v1/querybatch']['post']['tags'] = ['api']
97 spec['paths']['/v1/vulns/{id}']['get']['tags'] = ['api']
98
99 spec['paths']['/v1/query']['post']['x-code-samples'] = [{
100 'lang':
101 'Curl example',
102 'source':
103 ('curl -X POST -d \\\n'
104 ' \'{"commit": "6879efc2c1596d11a6a6ad296f80063b558d5e0f"}\' \\\n'
105 ' "https://api.osv.dev/v1/query"\n\n'
106 'curl -X POST -d \\\n'
107 ' \'{"package": {"name": "mruby"}, "version": "2.1.2rc"}\' \\\n'
108 ' "https://api.osv.dev/v1/query"')
109 }]
110
111 spec['paths']['/v1/querybatch']['post']['x-code-samples'] = [{
112 'lang':
113 'Curl example',
114 'source':
115 ("""cat <<EOF | curl -X POST -d @- "https://api.osv.dev/v1/querybatch"
116 {
117 "queries": [
118 {
119 "package": {
120 "purl": "pkg:pypi/[email protected]"
121 }
122 },
123 {
124 "commit": "6879efc2c1596d11a6a6ad296f80063b558d5e0f"
125 },
126 {
127 "package": {
128 "ecosystem": "PyPI",
129 "name": "jinja2"
130 },
131 "version": "2.4.1"
132 }
133 ]
134 }
135 EOF""")
136 }]
137
138 spec['paths']['/v1/vulns/{id}']['get']['x-code-samples'] = [{
139 'lang': 'Curl example',
140 'source': 'curl "https://api.osv.dev/v1/vulns/OSV-2020-111"'
141 }]
142
143 property_description_workaround(spec['definitions']['v1Query'])
144 property_description_workaround(spec['definitions']['osvVulnerability'])
145
146 replace_property_name(spec['definitions']['osvVulnerability'],
147 'databaseSpecific', 'database_specific')
148
149 with open('sections.md') as f:
150 spec['info']['description'] = f.read()
151
152 with open(_GENERATED_FILENAME, 'w') as f:
153 f.write(json.dumps(spec, indent=2))
154
155 shutil.move(_GENERATED_FILENAME, os.path.basename(_GENERATED_FILENAME))
156
157
158 if __name__ == '__main__':
159 main()
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/build.py b/docs/build.py
--- a/docs/build.py
+++ b/docs/build.py
@@ -100,10 +100,10 @@
'lang':
'Curl example',
'source':
- ('curl -X POST -d \\\n'
+ ('curl -d \\\n'
' \'{"commit": "6879efc2c1596d11a6a6ad296f80063b558d5e0f"}\' \\\n'
' "https://api.osv.dev/v1/query"\n\n'
- 'curl -X POST -d \\\n'
+ 'curl -d \\\n'
' \'{"package": {"name": "mruby"}, "version": "2.1.2rc"}\' \\\n'
' "https://api.osv.dev/v1/query"')
}]
@@ -111,8 +111,7 @@
spec['paths']['/v1/querybatch']['post']['x-code-samples'] = [{
'lang':
'Curl example',
- 'source':
- ("""cat <<EOF | curl -X POST -d @- "https://api.osv.dev/v1/querybatch"
+ 'source': ("""cat <<EOF | curl -d @- "https://api.osv.dev/v1/querybatch"
{
"queries": [
{
|
{"golden_diff": "diff --git a/docs/build.py b/docs/build.py\n--- a/docs/build.py\n+++ b/docs/build.py\n@@ -100,10 +100,10 @@\n 'lang':\n 'Curl example',\n 'source':\n- ('curl -X POST -d \\\\\\n'\n+ ('curl -d \\\\\\n'\n ' \\'{\"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"\\n\\n'\n- 'curl -X POST -d \\\\\\n'\n+ 'curl -d \\\\\\n'\n ' \\'{\"package\": {\"name\": \"mruby\"}, \"version\": \"2.1.2rc\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"')\n }]\n@@ -111,8 +111,7 @@\n spec['paths']['/v1/querybatch']['post']['x-code-samples'] = [{\n 'lang':\n 'Curl example',\n- 'source':\n- (\"\"\"cat <<EOF | curl -X POST -d @- \"https://api.osv.dev/v1/querybatch\"\n+ 'source': (\"\"\"cat <<EOF | curl -d @- \"https://api.osv.dev/v1/querybatch\"\n {\n \"queries\": [\n {\n", "issue": "Don't suggest using -X POST\nOn https://osv.dev/#use-the-api is the instruction\r\n```\r\nQuery by commit hash\r\n\r\ncurl -X POST -d \\\r\n '{\"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"}' \\\r\n \"https://api.osv.dev/v1/query\"\r\n```\r\n\r\nUsing `-X POST` here is unnecessary, redundant and potentially dangerous as people cut and paste this into more places. curl will actually tell you this if you add `-v` to this command:\r\n\r\n`Note: Unnecessary use of -X or --request, POST is already inferred.`\r\n\r\nSee also https://daniel.haxx.se/blog/2015/09/11/unnecessary-use-of-curl-x/\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Documentation builder.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport subprocess\n\n_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n_GENERATED_FILENAME = 'v1/osv_service_v1.swagger.json'\n\n\ndef property_description_workaround(definition):\n \"\"\"Work around an OpenAPI limitation with a field descriptions getting\n replaced by the object descriptions.\"\"\"\n # Workaround described in https://github.com/Redocly/redoc/issues/835.\n for value in definition['properties'].values():\n if '$ref' in value:\n value['allOf'] = [{'$ref': value['$ref']}]\n del value['$ref']\n\n\ndef replace_property_name(definition, key, replacement):\n \"\"\"Replace property name.\"\"\"\n definition['properties'][replacement] = definition['properties'][key]\n del definition['properties'][key]\n\n\ndef main():\n api_dir = os.path.join(_ROOT_DIR, 'gcp', 'api')\n v1_api_dir = os.path.join(api_dir, 'v1')\n googleapis_dir = os.path.join(api_dir, 'googleapis')\n service_proto_path = os.path.join(v1_api_dir, 'osv_service_v1.proto')\n\n # Add OSV dependencies.\n osv_path = os.path.join(api_dir, 'osv')\n if os.path.exists(osv_path):\n shutil.rmtree(osv_path)\n\n shutil.copytree(os.path.join(_ROOT_DIR, 'osv'), osv_path)\n\n subprocess.run([\n 'protoc',\n '-I',\n api_dir,\n '-I',\n v1_api_dir,\n '-I',\n googleapis_dir,\n '--openapiv2_out',\n '.',\n '--openapiv2_opt',\n 'logtostderr=true',\n service_proto_path,\n ],\n check=True)\n\n with open(_GENERATED_FILENAME) as f:\n spec = json.load(f)\n\n spec['host'] = 'api.osv.dev'\n spec['info']['title'] = 'OSV'\n spec['info']['version'] = '1.0'\n spec['tags'] = [{\n 'name': 'api',\n 'x-displayName': 'API',\n 'description': 'The API has 3 methods:'\n }, {\n 'name': 'vulnerability_schema',\n 'x-displayName': 'Vulnerability schema',\n 'description': 'Please see the [OpenSSF Open Source Vulnerability spec]'\n '(https://ossf.github.io/osv-schema/).',\n }]\n\n spec['x-tagGroups'] = [{\n 'name': 'API',\n 'tags': ['api']\n }, {\n 'name': 'Schema',\n 'tags': ['vulnerability_schema']\n }]\n\n spec['paths']['/v1/query']['post']['tags'] = ['api']\n spec['paths']['/v1/querybatch']['post']['tags'] = ['api']\n spec['paths']['/v1/vulns/{id}']['get']['tags'] = ['api']\n\n spec['paths']['/v1/query']['post']['x-code-samples'] = [{\n 'lang':\n 'Curl example',\n 'source':\n ('curl -X POST -d \\\\\\n'\n ' \\'{\"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"\\n\\n'\n 'curl -X POST -d \\\\\\n'\n ' \\'{\"package\": {\"name\": \"mruby\"}, \"version\": \"2.1.2rc\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"')\n }]\n\n spec['paths']['/v1/querybatch']['post']['x-code-samples'] = [{\n 'lang':\n 'Curl example',\n 'source':\n (\"\"\"cat <<EOF | curl -X POST -d @- \"https://api.osv.dev/v1/querybatch\"\n{\n \"queries\": [\n {\n \"package\": {\n \"purl\": \"pkg:pypi/[email protected]\"\n }\n },\n {\n \"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"\n },\n {\n \"package\": {\n \"ecosystem\": \"PyPI\",\n \"name\": \"jinja2\"\n },\n \"version\": \"2.4.1\"\n }\n ]\n}\nEOF\"\"\")\n }]\n\n spec['paths']['/v1/vulns/{id}']['get']['x-code-samples'] = [{\n 'lang': 'Curl example',\n 'source': 'curl \"https://api.osv.dev/v1/vulns/OSV-2020-111\"'\n }]\n\n property_description_workaround(spec['definitions']['v1Query'])\n property_description_workaround(spec['definitions']['osvVulnerability'])\n\n replace_property_name(spec['definitions']['osvVulnerability'],\n 'databaseSpecific', 'database_specific')\n\n with open('sections.md') as f:\n spec['info']['description'] = f.read()\n\n with open(_GENERATED_FILENAME, 'w') as f:\n f.write(json.dumps(spec, indent=2))\n\n shutil.move(_GENERATED_FILENAME, os.path.basename(_GENERATED_FILENAME))\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/build.py"}], "after_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Documentation builder.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport subprocess\n\n_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n_GENERATED_FILENAME = 'v1/osv_service_v1.swagger.json'\n\n\ndef property_description_workaround(definition):\n \"\"\"Work around an OpenAPI limitation with a field descriptions getting\n replaced by the object descriptions.\"\"\"\n # Workaround described in https://github.com/Redocly/redoc/issues/835.\n for value in definition['properties'].values():\n if '$ref' in value:\n value['allOf'] = [{'$ref': value['$ref']}]\n del value['$ref']\n\n\ndef replace_property_name(definition, key, replacement):\n \"\"\"Replace property name.\"\"\"\n definition['properties'][replacement] = definition['properties'][key]\n del definition['properties'][key]\n\n\ndef main():\n api_dir = os.path.join(_ROOT_DIR, 'gcp', 'api')\n v1_api_dir = os.path.join(api_dir, 'v1')\n googleapis_dir = os.path.join(api_dir, 'googleapis')\n service_proto_path = os.path.join(v1_api_dir, 'osv_service_v1.proto')\n\n # Add OSV dependencies.\n osv_path = os.path.join(api_dir, 'osv')\n if os.path.exists(osv_path):\n shutil.rmtree(osv_path)\n\n shutil.copytree(os.path.join(_ROOT_DIR, 'osv'), osv_path)\n\n subprocess.run([\n 'protoc',\n '-I',\n api_dir,\n '-I',\n v1_api_dir,\n '-I',\n googleapis_dir,\n '--openapiv2_out',\n '.',\n '--openapiv2_opt',\n 'logtostderr=true',\n service_proto_path,\n ],\n check=True)\n\n with open(_GENERATED_FILENAME) as f:\n spec = json.load(f)\n\n spec['host'] = 'api.osv.dev'\n spec['info']['title'] = 'OSV'\n spec['info']['version'] = '1.0'\n spec['tags'] = [{\n 'name': 'api',\n 'x-displayName': 'API',\n 'description': 'The API has 3 methods:'\n }, {\n 'name': 'vulnerability_schema',\n 'x-displayName': 'Vulnerability schema',\n 'description': 'Please see the [OpenSSF Open Source Vulnerability spec]'\n '(https://ossf.github.io/osv-schema/).',\n }]\n\n spec['x-tagGroups'] = [{\n 'name': 'API',\n 'tags': ['api']\n }, {\n 'name': 'Schema',\n 'tags': ['vulnerability_schema']\n }]\n\n spec['paths']['/v1/query']['post']['tags'] = ['api']\n spec['paths']['/v1/querybatch']['post']['tags'] = ['api']\n spec['paths']['/v1/vulns/{id}']['get']['tags'] = ['api']\n\n spec['paths']['/v1/query']['post']['x-code-samples'] = [{\n 'lang':\n 'Curl example',\n 'source':\n ('curl -d \\\\\\n'\n ' \\'{\"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"\\n\\n'\n 'curl -d \\\\\\n'\n ' \\'{\"package\": {\"name\": \"mruby\"}, \"version\": \"2.1.2rc\"}\\' \\\\\\n'\n ' \"https://api.osv.dev/v1/query\"')\n }]\n\n spec['paths']['/v1/querybatch']['post']['x-code-samples'] = [{\n 'lang':\n 'Curl example',\n 'source': (\"\"\"cat <<EOF | curl -d @- \"https://api.osv.dev/v1/querybatch\"\n{\n \"queries\": [\n {\n \"package\": {\n \"purl\": \"pkg:pypi/[email protected]\"\n }\n },\n {\n \"commit\": \"6879efc2c1596d11a6a6ad296f80063b558d5e0f\"\n },\n {\n \"package\": {\n \"ecosystem\": \"PyPI\",\n \"name\": \"jinja2\"\n },\n \"version\": \"2.4.1\"\n }\n ]\n}\nEOF\"\"\")\n }]\n\n spec['paths']['/v1/vulns/{id}']['get']['x-code-samples'] = [{\n 'lang': 'Curl example',\n 'source': 'curl \"https://api.osv.dev/v1/vulns/OSV-2020-111\"'\n }]\n\n property_description_workaround(spec['definitions']['v1Query'])\n property_description_workaround(spec['definitions']['osvVulnerability'])\n\n replace_property_name(spec['definitions']['osvVulnerability'],\n 'databaseSpecific', 'database_specific')\n\n with open('sections.md') as f:\n spec['info']['description'] = f.read()\n\n with open(_GENERATED_FILENAME, 'w') as f:\n f.write(json.dumps(spec, indent=2))\n\n shutil.move(_GENERATED_FILENAME, os.path.basename(_GENERATED_FILENAME))\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/build.py"}]}
| 2,166 | 329 |
gh_patches_debug_16275
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1256
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accounts post API crashes with unset id (if basicauth is enabled too)
```
gsurita-30820:~ gsurita$ echo '{"data": {"password": "me"}}' | http post localhost:8888/v1/accounts -a foo:bar
HTTP/1.1 500 Internal Server Error
(...)
```
```
Traceback (most recent call last):
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/tweens.py", line 22, in excview_tween
response = handler(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py", line 119, in tm_tween
reraise(*exc_info)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/compat.py", line 15, in reraise
raise value
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py", line 98, in tm_tween
response = handler(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/router.py", line 155, in handle_request
view_name
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/view.py", line 612, in _call_view
response = view_callable(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 389, in attr_view
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 367, in predicate_wrapper
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 300, in secured_view
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 438, in rendered_view
result = view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py", line 81, in collection_post
result = super(Account, self).collection_post()
File "/Users/gsurita/kinto/kinto/kinto/core/resource/__init__.py", line 341, in collection_post
new_record = self.process_record(new_record)
File "/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py", line 102, in process_record
if new[self.model.id_field] != self.request.selected_userid:
KeyError: 'id'
```
Accounts post API crashes with unset id (if basicauth is enabled too)
```
gsurita-30820:~ gsurita$ echo '{"data": {"password": "me"}}' | http post localhost:8888/v1/accounts -a foo:bar
HTTP/1.1 500 Internal Server Error
(...)
```
```
Traceback (most recent call last):
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/tweens.py", line 22, in excview_tween
response = handler(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py", line 119, in tm_tween
reraise(*exc_info)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/compat.py", line 15, in reraise
raise value
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py", line 98, in tm_tween
response = handler(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/router.py", line 155, in handle_request
view_name
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/view.py", line 612, in _call_view
response = view_callable(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 389, in attr_view
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 367, in predicate_wrapper
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 300, in secured_view
return view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 438, in rendered_view
result = view(context, request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py", line 81, in collection_post
result = super(Account, self).collection_post()
File "/Users/gsurita/kinto/kinto/kinto/core/resource/__init__.py", line 341, in collection_post
new_record = self.process_record(new_record)
File "/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py", line 102, in process_record
if new[self.model.id_field] != self.request.selected_userid:
KeyError: 'id'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/accounts/__init__.py`
Content:
```
1 from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
2
3
4 def includeme(config):
5 config.add_api_capability(
6 'accounts',
7 description='Manage user accounts.',
8 url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')
9
10 config.scan('kinto.plugins.accounts.views')
11
12 PERMISSIONS_INHERITANCE_TREE[''].update({
13 'account:create': {}
14 })
15 PERMISSIONS_INHERITANCE_TREE['account'] = {
16 'write': {'account': ['write']},
17 'read': {'account': ['write', 'read']}
18 }
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/accounts/__init__.py b/kinto/plugins/accounts/__init__.py
--- a/kinto/plugins/accounts/__init__.py
+++ b/kinto/plugins/accounts/__init__.py
@@ -1,4 +1,5 @@
from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
+from pyramid.exceptions import ConfigurationError
def includeme(config):
@@ -16,3 +17,12 @@
'write': {'account': ['write']},
'read': {'account': ['write', 'read']}
}
+
+ # Add some safety to avoid weird behaviour with basicauth default policy.
+ settings = config.get_settings()
+ auth_policies = settings['multiauth.policies']
+ if 'basicauth' in auth_policies and 'account' in auth_policies:
+ if auth_policies.index('basicauth') < auth_policies.index('account'):
+ error_msg = ("'basicauth' should not be mentioned before 'account' "
+ "in 'multiauth.policies' setting.")
+ raise ConfigurationError(error_msg)
|
{"golden_diff": "diff --git a/kinto/plugins/accounts/__init__.py b/kinto/plugins/accounts/__init__.py\n--- a/kinto/plugins/accounts/__init__.py\n+++ b/kinto/plugins/accounts/__init__.py\n@@ -1,4 +1,5 @@\n from kinto.authorization import PERMISSIONS_INHERITANCE_TREE\n+from pyramid.exceptions import ConfigurationError\n \n \n def includeme(config):\n@@ -16,3 +17,12 @@\n 'write': {'account': ['write']},\n 'read': {'account': ['write', 'read']}\n }\n+\n+ # Add some safety to avoid weird behaviour with basicauth default policy.\n+ settings = config.get_settings()\n+ auth_policies = settings['multiauth.policies']\n+ if 'basicauth' in auth_policies and 'account' in auth_policies:\n+ if auth_policies.index('basicauth') < auth_policies.index('account'):\n+ error_msg = (\"'basicauth' should not be mentioned before 'account' \"\n+ \"in 'multiauth.policies' setting.\")\n+ raise ConfigurationError(error_msg)\n", "issue": "Accounts post API crashes with unset id (if basicauth is enabled too)\n```\r\ngsurita-30820:~ gsurita$ echo '{\"data\": {\"password\": \"me\"}}' | http post localhost:8888/v1/accounts -a foo:bar\r\nHTTP/1.1 500 Internal Server Error\r\n(...)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/tweens.py\", line 22, in excview_tween\r\n response = handler(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py\", line 119, in tm_tween\r\n reraise(*exc_info)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/compat.py\", line 15, in reraise\r\n raise value\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py\", line 98, in tm_tween\r\n response = handler(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/router.py\", line 155, in handle_request\r\n view_name\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/view.py\", line 612, in _call_view\r\n response = view_callable(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 389, in attr_view\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 367, in predicate_wrapper\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 300, in secured_view\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 438, in rendered_view\r\n result = view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py\", line 81, in collection_post\r\n result = super(Account, self).collection_post()\r\n File \"/Users/gsurita/kinto/kinto/kinto/core/resource/__init__.py\", line 341, in collection_post\r\n new_record = self.process_record(new_record)\r\n File \"/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py\", line 102, in process_record\r\n if new[self.model.id_field] != self.request.selected_userid:\r\nKeyError: 'id'\r\n```\nAccounts post API crashes with unset id (if basicauth is enabled too)\n```\r\ngsurita-30820:~ gsurita$ echo '{\"data\": {\"password\": \"me\"}}' | http post localhost:8888/v1/accounts -a foo:bar\r\nHTTP/1.1 500 Internal Server Error\r\n(...)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/tweens.py\", line 22, in excview_tween\r\n response = handler(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py\", line 119, in tm_tween\r\n reraise(*exc_info)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/compat.py\", line 15, in reraise\r\n raise value\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid_tm/__init__.py\", line 98, in tm_tween\r\n response = handler(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/router.py\", line 155, in handle_request\r\n view_name\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/view.py\", line 612, in _call_view\r\n response = view_callable(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 389, in attr_view\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 367, in predicate_wrapper\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 300, in secured_view\r\n return view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 438, in rendered_view\r\n result = view(context, request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/Users/gsurita/kinto/kinto/.venv/lib/python3.6/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py\", line 81, in collection_post\r\n result = super(Account, self).collection_post()\r\n File \"/Users/gsurita/kinto/kinto/kinto/core/resource/__init__.py\", line 341, in collection_post\r\n new_record = self.process_record(new_record)\r\n File \"/Users/gsurita/kinto/kinto/kinto/plugins/accounts/views.py\", line 102, in process_record\r\n if new[self.model.id_field] != self.request.selected_userid:\r\nKeyError: 'id'\r\n```\n", "before_files": [{"content": "from kinto.authorization import PERMISSIONS_INHERITANCE_TREE\n\n\ndef includeme(config):\n config.add_api_capability(\n 'accounts',\n description='Manage user accounts.',\n url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')\n\n config.scan('kinto.plugins.accounts.views')\n\n PERMISSIONS_INHERITANCE_TREE[''].update({\n 'account:create': {}\n })\n PERMISSIONS_INHERITANCE_TREE['account'] = {\n 'write': {'account': ['write']},\n 'read': {'account': ['write', 'read']}\n }\n", "path": "kinto/plugins/accounts/__init__.py"}], "after_files": [{"content": "from kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom pyramid.exceptions import ConfigurationError\n\n\ndef includeme(config):\n config.add_api_capability(\n 'accounts',\n description='Manage user accounts.',\n url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')\n\n config.scan('kinto.plugins.accounts.views')\n\n PERMISSIONS_INHERITANCE_TREE[''].update({\n 'account:create': {}\n })\n PERMISSIONS_INHERITANCE_TREE['account'] = {\n 'write': {'account': ['write']},\n 'read': {'account': ['write', 'read']}\n }\n\n # Add some safety to avoid weird behaviour with basicauth default policy.\n settings = config.get_settings()\n auth_policies = settings['multiauth.policies']\n if 'basicauth' in auth_policies and 'account' in auth_policies:\n if auth_policies.index('basicauth') < auth_policies.index('account'):\n error_msg = (\"'basicauth' should not be mentioned before 'account' \"\n \"in 'multiauth.policies' setting.\")\n raise ConfigurationError(error_msg)\n", "path": "kinto/plugins/accounts/__init__.py"}]}
| 2,010 | 241 |
gh_patches_debug_26382
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-1675
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EmbedArt Plugin: remove_art_file doesn't seem to work
I'm running beets version 1.15. The EmbedArt plugin isn't removing the art file from the file system.
Logfile: http://pastebin.com/n10bbdpS
Config: http://pastebin.com/ztrjd16C
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/embedart.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2015, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Allows beets to embed album art into file metadata."""
16 from __future__ import (division, absolute_import, print_function,
17 unicode_literals)
18
19 import os.path
20
21 from beets.plugins import BeetsPlugin
22 from beets import ui
23 from beets.ui import decargs
24 from beets.util import syspath, normpath, displayable_path, bytestring_path
25 from beets.util.artresizer import ArtResizer
26 from beets import config
27 from beets import art
28
29
30 class EmbedCoverArtPlugin(BeetsPlugin):
31 """Allows albumart to be embedded into the actual files.
32 """
33 def __init__(self):
34 super(EmbedCoverArtPlugin, self).__init__()
35 self.config.add({
36 'maxwidth': 0,
37 'auto': True,
38 'compare_threshold': 0,
39 'ifempty': False,
40 'remove_art_file': False
41 })
42
43 if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
44 self.config['maxwidth'] = 0
45 self._log.warning(u"ImageMagick or PIL not found; "
46 u"'maxwidth' option ignored")
47 if self.config['compare_threshold'].get(int) and not \
48 ArtResizer.shared.can_compare:
49 self.config['compare_threshold'] = 0
50 self._log.warning(u"ImageMagick 6.8.7 or higher not installed; "
51 u"'compare_threshold' option ignored")
52
53 self.register_listener('art_set', self.process_album)
54
55 def commands(self):
56 # Embed command.
57 embed_cmd = ui.Subcommand(
58 'embedart', help='embed image files into file metadata'
59 )
60 embed_cmd.parser.add_option(
61 '-f', '--file', metavar='PATH', help='the image file to embed'
62 )
63 maxwidth = self.config['maxwidth'].get(int)
64 compare_threshold = self.config['compare_threshold'].get(int)
65 ifempty = self.config['ifempty'].get(bool)
66 remove_art_file = self.config['remove_art_file'].get(bool)
67
68 def embed_func(lib, opts, args):
69 if opts.file:
70 imagepath = normpath(opts.file)
71 if not os.path.isfile(syspath(imagepath)):
72 raise ui.UserError(u'image file {0} not found'.format(
73 displayable_path(imagepath)
74 ))
75 for item in lib.items(decargs(args)):
76 art.embed_item(self._log, item, imagepath, maxwidth, None,
77 compare_threshold, ifempty)
78 else:
79 for album in lib.albums(decargs(args)):
80 art.embed_album(self._log, album, maxwidth, False,
81 compare_threshold, ifempty)
82
83 if remove_art_file and album.artpath is not None:
84 if os.path.isfile(album.artpath):
85 self._log.debug(u'Removing album art file '
86 u'for {0}', album)
87 os.remove(album.artpath)
88 album.artpath = None
89 album.store()
90
91 embed_cmd.func = embed_func
92
93 # Extract command.
94 extract_cmd = ui.Subcommand('extractart',
95 help='extract an image from file metadata')
96 extract_cmd.parser.add_option('-o', dest='outpath',
97 help='image output file')
98 extract_cmd.parser.add_option('-n', dest='filename',
99 help='image filename to create for all '
100 'matched albums')
101 extract_cmd.parser.add_option('-a', dest='associate',
102 action='store_true',
103 help='associate the extracted images '
104 'with the album')
105
106 def extract_func(lib, opts, args):
107 if opts.outpath:
108 art.extract_first(self._log, normpath(opts.outpath),
109 lib.items(decargs(args)))
110 else:
111 filename = bytestring_path(opts.filename or
112 config['art_filename'].get())
113 if os.path.dirname(filename) != '':
114 self._log.error(u"Only specify a name rather than a path "
115 u"for -n")
116 return
117 for album in lib.albums(decargs(args)):
118 artpath = normpath(os.path.join(album.path, filename))
119 artpath = art.extract_first(self._log, artpath,
120 album.items())
121 if artpath and opts.associate:
122 album.set_art(artpath)
123 album.store()
124 extract_cmd.func = extract_func
125
126 # Clear command.
127 clear_cmd = ui.Subcommand('clearart',
128 help='remove images from file metadata')
129
130 def clear_func(lib, opts, args):
131 art.clear(self._log, lib, decargs(args))
132 clear_cmd.func = clear_func
133
134 return [embed_cmd, extract_cmd, clear_cmd]
135
136 def process_album(self, album):
137 """Automatically embed art after art has been set
138 """
139 if self.config['auto'] and config['import']['write']:
140 max_width = self.config['maxwidth'].get(int)
141 art.embed_album(self._log, album, max_width, True,
142 self.config['compare_threshold'].get(int),
143 self.config['ifempty'].get(bool))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py
--- a/beetsplug/embedart.py
+++ b/beetsplug/embedart.py
@@ -79,14 +79,7 @@
for album in lib.albums(decargs(args)):
art.embed_album(self._log, album, maxwidth, False,
compare_threshold, ifempty)
-
- if remove_art_file and album.artpath is not None:
- if os.path.isfile(album.artpath):
- self._log.debug(u'Removing album art file '
- u'for {0}', album)
- os.remove(album.artpath)
- album.artpath = None
- album.store()
+ self.remove_artfile(album)
embed_cmd.func = embed_func
@@ -141,3 +134,12 @@
art.embed_album(self._log, album, max_width, True,
self.config['compare_threshold'].get(int),
self.config['ifempty'].get(bool))
+ self.remove_artfile(album)
+ def remove_artfile(self, album)
+ if self.config['remove_art_file'] and album.artpath:
+ if os.path.isfile(album.artpath):
+ self._log.debug(u'Removing album art file '
+ u'for {0}', album)
+ os.remove(album.artpath)
+ album.artpath = None
+ album.store()
|
{"golden_diff": "diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py\n--- a/beetsplug/embedart.py\n+++ b/beetsplug/embedart.py\n@@ -79,14 +79,7 @@\n for album in lib.albums(decargs(args)):\n art.embed_album(self._log, album, maxwidth, False,\n compare_threshold, ifempty)\n-\n- if remove_art_file and album.artpath is not None:\n- if os.path.isfile(album.artpath):\n- self._log.debug(u'Removing album art file '\n- u'for {0}', album)\n- os.remove(album.artpath)\n- album.artpath = None\n- album.store()\n+ self.remove_artfile(album)\n \n embed_cmd.func = embed_func\n \n@@ -141,3 +134,12 @@\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n self.config['ifempty'].get(bool))\n+ self.remove_artfile(album)\n+ def remove_artfile(self, album)\n+ if self.config['remove_art_file'] and album.artpath:\n+ if os.path.isfile(album.artpath):\n+ self._log.debug(u'Removing album art file '\n+ u'for {0}', album)\n+ os.remove(album.artpath)\n+ album.artpath = None\n+ album.store()\n", "issue": "EmbedArt Plugin: remove_art_file doesn't seem to work\nI'm running beets version 1.15. The EmbedArt plugin isn't removing the art file from the file system. \nLogfile: http://pastebin.com/n10bbdpS\nConfig: http://pastebin.com/ztrjd16C\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path, bytestring_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\nfrom beets import art\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n 'remove_art_file': False\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('art_set', self.process_album)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n remove_art_file = self.config['remove_art_file'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n art.embed_item(self._log, item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n art.embed_album(self._log, album, maxwidth, False,\n compare_threshold, ifempty)\n\n if remove_art_file and album.artpath is not None:\n if os.path.isfile(album.artpath):\n self._log.debug(u'Removing album art file '\n u'for {0}', album)\n os.remove(album.artpath)\n album.artpath = None\n album.store()\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n extract_cmd.parser.add_option('-n', dest='filename',\n help='image filename to create for all '\n 'matched albums')\n extract_cmd.parser.add_option('-a', dest='associate',\n action='store_true',\n help='associate the extracted images '\n 'with the album')\n\n def extract_func(lib, opts, args):\n if opts.outpath:\n art.extract_first(self._log, normpath(opts.outpath),\n lib.items(decargs(args)))\n else:\n filename = bytestring_path(opts.filename or\n config['art_filename'].get())\n if os.path.dirname(filename) != '':\n self._log.error(u\"Only specify a name rather than a path \"\n u\"for -n\")\n return\n for album in lib.albums(decargs(args)):\n artpath = normpath(os.path.join(album.path, filename))\n artpath = art.extract_first(self._log, artpath,\n album.items())\n if artpath and opts.associate:\n album.set_art(artpath)\n album.store()\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n art.clear(self._log, lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def process_album(self, album):\n \"\"\"Automatically embed art after art has been set\n \"\"\"\n if self.config['auto'] and config['import']['write']:\n max_width = self.config['maxwidth'].get(int)\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n self.config['ifempty'].get(bool))\n", "path": "beetsplug/embedart.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows beets to embed album art into file metadata.\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os.path\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets.ui import decargs\nfrom beets.util import syspath, normpath, displayable_path, bytestring_path\nfrom beets.util.artresizer import ArtResizer\nfrom beets import config\nfrom beets import art\n\n\nclass EmbedCoverArtPlugin(BeetsPlugin):\n \"\"\"Allows albumart to be embedded into the actual files.\n \"\"\"\n def __init__(self):\n super(EmbedCoverArtPlugin, self).__init__()\n self.config.add({\n 'maxwidth': 0,\n 'auto': True,\n 'compare_threshold': 0,\n 'ifempty': False,\n 'remove_art_file': False\n })\n\n if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:\n self.config['maxwidth'] = 0\n self._log.warning(u\"ImageMagick or PIL not found; \"\n u\"'maxwidth' option ignored\")\n if self.config['compare_threshold'].get(int) and not \\\n ArtResizer.shared.can_compare:\n self.config['compare_threshold'] = 0\n self._log.warning(u\"ImageMagick 6.8.7 or higher not installed; \"\n u\"'compare_threshold' option ignored\")\n\n self.register_listener('art_set', self.process_album)\n\n def commands(self):\n # Embed command.\n embed_cmd = ui.Subcommand(\n 'embedart', help='embed image files into file metadata'\n )\n embed_cmd.parser.add_option(\n '-f', '--file', metavar='PATH', help='the image file to embed'\n )\n maxwidth = self.config['maxwidth'].get(int)\n compare_threshold = self.config['compare_threshold'].get(int)\n ifempty = self.config['ifempty'].get(bool)\n remove_art_file = self.config['remove_art_file'].get(bool)\n\n def embed_func(lib, opts, args):\n if opts.file:\n imagepath = normpath(opts.file)\n if not os.path.isfile(syspath(imagepath)):\n raise ui.UserError(u'image file {0} not found'.format(\n displayable_path(imagepath)\n ))\n for item in lib.items(decargs(args)):\n art.embed_item(self._log, item, imagepath, maxwidth, None,\n compare_threshold, ifempty)\n else:\n for album in lib.albums(decargs(args)):\n art.embed_album(self._log, album, maxwidth, False,\n compare_threshold, ifempty)\n self.remove_artfile(album)\n\n embed_cmd.func = embed_func\n\n # Extract command.\n extract_cmd = ui.Subcommand('extractart',\n help='extract an image from file metadata')\n extract_cmd.parser.add_option('-o', dest='outpath',\n help='image output file')\n extract_cmd.parser.add_option('-n', dest='filename',\n help='image filename to create for all '\n 'matched albums')\n extract_cmd.parser.add_option('-a', dest='associate',\n action='store_true',\n help='associate the extracted images '\n 'with the album')\n\n def extract_func(lib, opts, args):\n if opts.outpath:\n art.extract_first(self._log, normpath(opts.outpath),\n lib.items(decargs(args)))\n else:\n filename = bytestring_path(opts.filename or\n config['art_filename'].get())\n if os.path.dirname(filename) != '':\n self._log.error(u\"Only specify a name rather than a path \"\n u\"for -n\")\n return\n for album in lib.albums(decargs(args)):\n artpath = normpath(os.path.join(album.path, filename))\n artpath = art.extract_first(self._log, artpath,\n album.items())\n if artpath and opts.associate:\n album.set_art(artpath)\n album.store()\n extract_cmd.func = extract_func\n\n # Clear command.\n clear_cmd = ui.Subcommand('clearart',\n help='remove images from file metadata')\n\n def clear_func(lib, opts, args):\n art.clear(self._log, lib, decargs(args))\n clear_cmd.func = clear_func\n\n return [embed_cmd, extract_cmd, clear_cmd]\n\n def process_album(self, album):\n \"\"\"Automatically embed art after art has been set\n \"\"\"\n if self.config['auto'] and config['import']['write']:\n max_width = self.config['maxwidth'].get(int)\n art.embed_album(self._log, album, max_width, True,\n self.config['compare_threshold'].get(int),\n self.config['ifempty'].get(bool))\n self.remove_artfile(album)\n def remove_artfile(self, album)\n if self.config['remove_art_file'] and album.artpath:\n if os.path.isfile(album.artpath):\n self._log.debug(u'Removing album art file '\n u'for {0}', album)\n os.remove(album.artpath)\n album.artpath = None\n album.store()\n", "path": "beetsplug/embedart.py"}]}
| 1,878 | 305 |
gh_patches_debug_13392
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4311
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import-check may return error saying "director does not exist" when it actually just lack permissions
Endpoint /pulp/api/v3/importers/core/pulp/import-check/ returns error saying "Directory does not exist" when pulp user lack permissions to read said directory.
**To Reproduce**
Try importing content from a directory where pulp user doesn't have read access.
**Expected behavior**
Error returned should indicate the permission error.
**Additional context**
Pulp is using os.path.exists() method to verify if the directory exists: https://github.com/pulp/pulpcore/blob/main/pulpcore/app/views/importer.py#L44-L45
However, the method can return false if permission is not granted to access the directory even if the directory exists
~~~
os.path.exists(path)
Return True if path refers to an existing path or an open file descriptor. Returns False for broken symbolic links. On some platforms, this function may return False if permission is not granted to execute os.stat() on the requested file, even if the path physically exists.
~~~
os.path method documentation -> https://docs.python.org/3/library/os.path.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/views/importer.py`
Content:
```
1 from gettext import gettext as _
2 import json
3 import os
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.views import APIView
6 from rest_framework.response import Response
7
8 from pulpcore.app import settings
9 from pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer
10
11
12 def _check_allowed_import_path(a_path):
13 user_provided_realpath = os.path.realpath(a_path)
14 for allowed_path in settings.ALLOWED_IMPORT_PATHS:
15 if user_provided_realpath.startswith(allowed_path):
16 return True, None
17 return False, _(
18 "{} is not an allowed import path".format(os.path.dirname(os.path.realpath(a_path)))
19 )
20
21
22 def _validate_file(in_param, data):
23 """
24 Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]
25
26 We check for a number of things, attempting to return all the errors we can find. We don't want
27 to give out information for files in arbitrary locations on the filesystem; if the check
28 for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.
29
30 If the directory containing the base-file doesn't exist, or isn't readable, or the specified
31 file doesn't exist, report and return.
32
33 Error-messages for all other checks are additive.
34 """
35 # check allowed, leave if failed
36 file = data[in_param]
37 real_file = os.path.realpath(file)
38 rc, msg = _check_allowed_import_path(real_file)
39 if not rc:
40 return rc, [msg]
41
42 # check directory-sanity, leave if failed
43 owning_dir = os.path.dirname(real_file)
44 if not os.path.exists(owning_dir):
45 return False, [_("directory {} does not exist").format(owning_dir)]
46 if not os.access(owning_dir, os.R_OK):
47 return False, [_("directory {} does not allow read-access").format(owning_dir)]
48
49 # check file-exists, leave if failed
50 if not os.path.exists(real_file):
51 return False, [_("file {} does not exist").format(real_file)]
52
53 # check file-sanity
54 msgs = []
55 isfile = os.path.isfile(real_file)
56 readable = os.access(real_file, os.R_OK)
57
58 rc = isfile and readable
59 if not isfile:
60 msgs.append(_("{} is not a file".format(real_file)))
61 if not readable:
62 msgs.append(_("{} exists but cannot be read".format(real_file)))
63
64 # extra check for toc-dir-write
65 if in_param == "toc":
66 if not os.access(owning_dir, os.W_OK):
67 rc = False
68 msgs.append(_("directory {} must allow pulp write-access".format(owning_dir)))
69
70 return rc, msgs
71
72
73 class PulpImporterImportCheckView(APIView):
74 """
75 Returns validity of proposed parameters for a PulpImport call.
76 """
77
78 @extend_schema(
79 summary="Validate the parameters to be used for a PulpImport call",
80 operation_id="pulp_import_check_post",
81 request=PulpImportCheckSerializer,
82 responses={200: PulpImportCheckResponseSerializer},
83 )
84 def post(self, request, format=None):
85 """
86 Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.
87
88 * Checks that toc, path are in ALLOWED_IMPORT_PATHS
89 * if ALLOWED:
90 * Checks that toc, path exist and are readable
91 * If toc specified, checks that containing dir is writeable
92 * Checks that repo_mapping is valid JSON
93 """
94 serializer = PulpImportCheckSerializer(data=request.data)
95 if serializer.is_valid():
96 data = {}
97 if "toc" in serializer.data:
98 data["toc"] = {}
99 data["toc"]["context"] = serializer.data["toc"]
100 data["toc"]["is_valid"], data["toc"]["messages"] = _validate_file(
101 "toc", serializer.data
102 )
103
104 if "path" in serializer.data:
105 data["path"] = {}
106 data["path"]["context"] = serializer.data["path"]
107 data["path"]["is_valid"], data["path"]["messages"] = _validate_file(
108 "path", serializer.data
109 )
110
111 if "repo_mapping" in serializer.data:
112 data["repo_mapping"] = {}
113 data["repo_mapping"]["context"] = serializer.data["repo_mapping"]
114 try:
115 json.loads(serializer.data["repo_mapping"])
116 data["repo_mapping"]["is_valid"] = True
117 data["repo_mapping"]["messages"] = []
118 except json.JSONDecodeError:
119 data["repo_mapping"]["is_valid"] = False
120 data["repo_mapping"]["messages"] = [_("invalid JSON")]
121
122 crs = PulpImportCheckResponseSerializer(data, context={"request": request})
123 return Response(crs.data)
124 return Response(serializer.errors, status=400)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/views/importer.py b/pulpcore/app/views/importer.py
--- a/pulpcore/app/views/importer.py
+++ b/pulpcore/app/views/importer.py
@@ -40,11 +40,14 @@
return rc, [msg]
# check directory-sanity, leave if failed
+ # use os.stat to ensure directory exists and pulp has read-access
+ # return any errors received from os.stat to the user
+
owning_dir = os.path.dirname(real_file)
- if not os.path.exists(owning_dir):
- return False, [_("directory {} does not exist").format(owning_dir)]
- if not os.access(owning_dir, os.R_OK):
- return False, [_("directory {} does not allow read-access").format(owning_dir)]
+ try:
+ os.stat(owning_dir)
+ except OSError as e:
+ return False, [_("{}").format(e)]
# check file-exists, leave if failed
if not os.path.exists(real_file):
|
{"golden_diff": "diff --git a/pulpcore/app/views/importer.py b/pulpcore/app/views/importer.py\n--- a/pulpcore/app/views/importer.py\n+++ b/pulpcore/app/views/importer.py\n@@ -40,11 +40,14 @@\n return rc, [msg]\n \n # check directory-sanity, leave if failed\n+ # use os.stat to ensure directory exists and pulp has read-access\n+ # return any errors received from os.stat to the user\n+\n owning_dir = os.path.dirname(real_file)\n- if not os.path.exists(owning_dir):\n- return False, [_(\"directory {} does not exist\").format(owning_dir)]\n- if not os.access(owning_dir, os.R_OK):\n- return False, [_(\"directory {} does not allow read-access\").format(owning_dir)]\n+ try:\n+ os.stat(owning_dir)\n+ except OSError as e:\n+ return False, [_(\"{}\").format(e)]\n \n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n", "issue": "Import-check may return error saying \"director does not exist\" when it actually just lack permissions\nEndpoint /pulp/api/v3/importers/core/pulp/import-check/ returns error saying \"Directory does not exist\" when pulp user lack permissions to read said directory.\r\n\r\n**To Reproduce**\r\n\r\nTry importing content from a directory where pulp user doesn't have read access.\r\n\r\n**Expected behavior**\r\nError returned should indicate the permission error.\r\n\r\n**Additional context**\r\n\r\nPulp is using os.path.exists() method to verify if the directory exists: https://github.com/pulp/pulpcore/blob/main/pulpcore/app/views/importer.py#L44-L45\r\n\r\nHowever, the method can return false if permission is not granted to access the directory even if the directory exists\r\n\r\n~~~\r\nos.path.exists(path)\r\nReturn True if path refers to an existing path or an open file descriptor. Returns False for broken symbolic links. On some platforms, this function may return False if permission is not granted to execute os.stat() on the requested file, even if the path physically exists.\r\n~~~\r\n\r\nos.path method documentation -> https://docs.python.org/3/library/os.path.html\r\n\n", "before_files": [{"content": "from gettext import gettext as _\nimport json\nimport os\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom pulpcore.app import settings\nfrom pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer\n\n\ndef _check_allowed_import_path(a_path):\n user_provided_realpath = os.path.realpath(a_path)\n for allowed_path in settings.ALLOWED_IMPORT_PATHS:\n if user_provided_realpath.startswith(allowed_path):\n return True, None\n return False, _(\n \"{} is not an allowed import path\".format(os.path.dirname(os.path.realpath(a_path)))\n )\n\n\ndef _validate_file(in_param, data):\n \"\"\"\n Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]\n\n We check for a number of things, attempting to return all the errors we can find. We don't want\n to give out information for files in arbitrary locations on the filesystem; if the check\n for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.\n\n If the directory containing the base-file doesn't exist, or isn't readable, or the specified\n file doesn't exist, report and return.\n\n Error-messages for all other checks are additive.\n \"\"\"\n # check allowed, leave if failed\n file = data[in_param]\n real_file = os.path.realpath(file)\n rc, msg = _check_allowed_import_path(real_file)\n if not rc:\n return rc, [msg]\n\n # check directory-sanity, leave if failed\n owning_dir = os.path.dirname(real_file)\n if not os.path.exists(owning_dir):\n return False, [_(\"directory {} does not exist\").format(owning_dir)]\n if not os.access(owning_dir, os.R_OK):\n return False, [_(\"directory {} does not allow read-access\").format(owning_dir)]\n\n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n return False, [_(\"file {} does not exist\").format(real_file)]\n\n # check file-sanity\n msgs = []\n isfile = os.path.isfile(real_file)\n readable = os.access(real_file, os.R_OK)\n\n rc = isfile and readable\n if not isfile:\n msgs.append(_(\"{} is not a file\".format(real_file)))\n if not readable:\n msgs.append(_(\"{} exists but cannot be read\".format(real_file)))\n\n # extra check for toc-dir-write\n if in_param == \"toc\":\n if not os.access(owning_dir, os.W_OK):\n rc = False\n msgs.append(_(\"directory {} must allow pulp write-access\".format(owning_dir)))\n\n return rc, msgs\n\n\nclass PulpImporterImportCheckView(APIView):\n \"\"\"\n Returns validity of proposed parameters for a PulpImport call.\n \"\"\"\n\n @extend_schema(\n summary=\"Validate the parameters to be used for a PulpImport call\",\n operation_id=\"pulp_import_check_post\",\n request=PulpImportCheckSerializer,\n responses={200: PulpImportCheckResponseSerializer},\n )\n def post(self, request, format=None):\n \"\"\"\n Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.\n\n * Checks that toc, path are in ALLOWED_IMPORT_PATHS\n * if ALLOWED:\n * Checks that toc, path exist and are readable\n * If toc specified, checks that containing dir is writeable\n * Checks that repo_mapping is valid JSON\n \"\"\"\n serializer = PulpImportCheckSerializer(data=request.data)\n if serializer.is_valid():\n data = {}\n if \"toc\" in serializer.data:\n data[\"toc\"] = {}\n data[\"toc\"][\"context\"] = serializer.data[\"toc\"]\n data[\"toc\"][\"is_valid\"], data[\"toc\"][\"messages\"] = _validate_file(\n \"toc\", serializer.data\n )\n\n if \"path\" in serializer.data:\n data[\"path\"] = {}\n data[\"path\"][\"context\"] = serializer.data[\"path\"]\n data[\"path\"][\"is_valid\"], data[\"path\"][\"messages\"] = _validate_file(\n \"path\", serializer.data\n )\n\n if \"repo_mapping\" in serializer.data:\n data[\"repo_mapping\"] = {}\n data[\"repo_mapping\"][\"context\"] = serializer.data[\"repo_mapping\"]\n try:\n json.loads(serializer.data[\"repo_mapping\"])\n data[\"repo_mapping\"][\"is_valid\"] = True\n data[\"repo_mapping\"][\"messages\"] = []\n except json.JSONDecodeError:\n data[\"repo_mapping\"][\"is_valid\"] = False\n data[\"repo_mapping\"][\"messages\"] = [_(\"invalid JSON\")]\n\n crs = PulpImportCheckResponseSerializer(data, context={\"request\": request})\n return Response(crs.data)\n return Response(serializer.errors, status=400)\n", "path": "pulpcore/app/views/importer.py"}], "after_files": [{"content": "from gettext import gettext as _\nimport json\nimport os\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom pulpcore.app import settings\nfrom pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer\n\n\ndef _check_allowed_import_path(a_path):\n user_provided_realpath = os.path.realpath(a_path)\n for allowed_path in settings.ALLOWED_IMPORT_PATHS:\n if user_provided_realpath.startswith(allowed_path):\n return True, None\n return False, _(\n \"{} is not an allowed import path\".format(os.path.dirname(os.path.realpath(a_path)))\n )\n\n\ndef _validate_file(in_param, data):\n \"\"\"\n Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]\n\n We check for a number of things, attempting to return all the errors we can find. We don't want\n to give out information for files in arbitrary locations on the filesystem; if the check\n for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.\n\n If the directory containing the base-file doesn't exist, or isn't readable, or the specified\n file doesn't exist, report and return.\n\n Error-messages for all other checks are additive.\n \"\"\"\n # check allowed, leave if failed\n file = data[in_param]\n real_file = os.path.realpath(file)\n rc, msg = _check_allowed_import_path(real_file)\n if not rc:\n return rc, [msg]\n\n # check directory-sanity, leave if failed\n # use os.stat to ensure directory exists and pulp has read-access\n # return any errors received from os.stat to the user\n\n owning_dir = os.path.dirname(real_file)\n try:\n os.stat(owning_dir)\n except OSError as e:\n return False, [_(\"{}\").format(e)]\n\n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n return False, [_(\"file {} does not exist\").format(real_file)]\n\n # check file-sanity\n msgs = []\n isfile = os.path.isfile(real_file)\n readable = os.access(real_file, os.R_OK)\n\n rc = isfile and readable\n if not isfile:\n msgs.append(_(\"{} is not a file\".format(real_file)))\n if not readable:\n msgs.append(_(\"{} exists but cannot be read\".format(real_file)))\n\n # extra check for toc-dir-write\n if in_param == \"toc\":\n if not os.access(owning_dir, os.W_OK):\n rc = False\n msgs.append(_(\"directory {} must allow pulp write-access\".format(owning_dir)))\n\n return rc, msgs\n\n\nclass PulpImporterImportCheckView(APIView):\n \"\"\"\n Returns validity of proposed parameters for a PulpImport call.\n \"\"\"\n\n @extend_schema(\n summary=\"Validate the parameters to be used for a PulpImport call\",\n operation_id=\"pulp_import_check_post\",\n request=PulpImportCheckSerializer,\n responses={200: PulpImportCheckResponseSerializer},\n )\n def post(self, request, format=None):\n \"\"\"\n Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.\n\n * Checks that toc, path are in ALLOWED_IMPORT_PATHS\n * if ALLOWED:\n * Checks that toc, path exist and are readable\n * If toc specified, checks that containing dir is writeable\n * Checks that repo_mapping is valid JSON\n \"\"\"\n serializer = PulpImportCheckSerializer(data=request.data)\n if serializer.is_valid():\n data = {}\n if \"toc\" in serializer.data:\n data[\"toc\"] = {}\n data[\"toc\"][\"context\"] = serializer.data[\"toc\"]\n data[\"toc\"][\"is_valid\"], data[\"toc\"][\"messages\"] = _validate_file(\n \"toc\", serializer.data\n )\n\n if \"path\" in serializer.data:\n data[\"path\"] = {}\n data[\"path\"][\"context\"] = serializer.data[\"path\"]\n data[\"path\"][\"is_valid\"], data[\"path\"][\"messages\"] = _validate_file(\n \"path\", serializer.data\n )\n\n if \"repo_mapping\" in serializer.data:\n data[\"repo_mapping\"] = {}\n data[\"repo_mapping\"][\"context\"] = serializer.data[\"repo_mapping\"]\n try:\n json.loads(serializer.data[\"repo_mapping\"])\n data[\"repo_mapping\"][\"is_valid\"] = True\n data[\"repo_mapping\"][\"messages\"] = []\n except json.JSONDecodeError:\n data[\"repo_mapping\"][\"is_valid\"] = False\n data[\"repo_mapping\"][\"messages\"] = [_(\"invalid JSON\")]\n\n crs = PulpImportCheckResponseSerializer(data, context={\"request\": request})\n return Response(crs.data)\n return Response(serializer.errors, status=400)\n", "path": "pulpcore/app/views/importer.py"}]}
| 1,824 | 237 |
gh_patches_debug_14994
|
rasdani/github-patches
|
git_diff
|
rootpy__rootpy-773
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception on import when not forwarding X11
Dear developers,
I believe I'm experiencing a bug when trying to use rootpy over SSH. Simply importing
```Python
from rootpy.plotting import Hist
```
results in an exception:
```Python
WARNING:ROOT.TUnixSystem.SetDisplay] DISPLAY not set, setting it to :pts/0:S.8
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/__init__.py", line 12, in <module>
from .legend import Legend
File "/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py", line 318, in _importhook
return _orig_ihook( name, *args, **kwds )
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/legend.py", line 8, in <module>
from .box import _Positionable
File "/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py", line 318, in _importhook
return _orig_ihook( name, *args, **kwds )
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/box.py", line 5, in <module>
from .utils import canvases_with
File "/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py", line 318, in _importhook
return _orig_ihook( name, *args, **kwds )
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/utils.py", line 7, in <module>
from .canvas import _PadBase
File "/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py", line 318, in _importhook
return _orig_ihook( name, *args, **kwds )
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/canvas.py", line 186, in <module>
class Pad(_PadBase, QROOT.TPad):
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/utils/module_facade.py", line 84, in __getattr__
result = sup.__getattr__(key)
File "/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/utils/quickroot.py", line 71, in __getattr__
libname, symbol))
RuntimeError: Unable to load libGui (required by TPad)
```
The problem does not occur if I connect with `ssh -Y`, but I would expect rootpy be usable also without GUI.
I'm using rootpy 1.0.0 installed with pip, Python 3.5.3, ROOT 6.10.04 with Scientific Linux 6.5.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rootpy/utils/quickroot.py`
Content:
```
1 """
2 Quickly load ROOT symbols without triggering PyROOT's finalSetup().
3 The main principle is that appropriate dictionaries first need to be loaded.
4 """
5 from __future__ import absolute_import
6
7 import ROOT
8
9 from .. import log; log = log[__name__]
10 from .module_facade import Facade
11
12 __all__ = []
13
14
15 root_module = ROOT.module._root
16 if hasattr(root_module, 'LookupCppEntity'): # pragma: no cover
17 lookup_func = 'LookupCppEntity'
18 else: # pragma: no cover
19 lookup_func = 'LookupRootEntity'
20
21 # Quick's __name__ needs to be the ROOT module for this to be transparent.
22 # The below is one way of obtaining such a function
23 # First determine the ROOT version without triggering PyROOT's finalSetup()
24 Quick = eval('lambda symbol: module._root.{0}(symbol)'.format(lookup_func),
25 ROOT.__dict__)
26
27 _gSystem = Quick("gSystem")
28 Load = _gSystem.Load
29
30 # It is not vital to list _all_ symbols in here, just enough that a library
31 # will be loaded by the time it is needed.
32 SYMBOLS = dict(
33 Hist='TH1 TGraph TGraphAsymmErrors',
34 Tree='TCut TTree',
35 Gui='TPad TCanvas',
36 Graf='TLegend TLine TEllipse',
37 Physics='TVector2 TVector3 TLorentzVector TRotation TLorentzRotation',
38 Matrix='TMatrixT',
39 RooStats='RooStats RooMsgService',
40 RooFit='RooFit RooWorkspace',
41 )
42
43 # Mapping of symbols to libraries which need to be loaded
44 SYMBOLS_TO_LIB = dict(
45 (sym, lib) for lib, syms in SYMBOLS.items() for sym in syms.split())
46
47 # If you encounter problems with particular symbols, add them to this set.
48 SLOW = set("".split())
49
50
51 @Facade(__name__, expose_internal=False)
52 class QuickROOT(object):
53 def __getattr__(self, symbol):
54 if symbol in SLOW: # pragma: no cover
55 log.warning(
56 "Tried to quickly load {0} which is always slow".format(symbol))
57
58 lib = SYMBOLS_TO_LIB.get(symbol, None)
59 if lib:
60 # Load() doesn't cost anything if the library is already loaded
61 libname = "lib{0}".format(lib)
62 if libname not in _gSystem.GetLibraries():
63 regex = "^duplicate entry .* for level 0; ignored$"
64 with log["/ROOT.TEnvRec.ChangeValue"].ignore(regex):
65 if Load(libname) == 0:
66 log.debug("Loaded {0} (required by {1})".format(
67 libname, symbol))
68 else: # pragma: no cover
69 raise RuntimeError(
70 "Unable to load {0} (required by {1})".format(
71 libname, symbol))
72
73 try:
74 thing = Quick(symbol)
75 except NameError: # pragma: no cover
76 # NameError: global name 'module' is not defined
77 # Python must be exiting...
78 return None
79 if isinstance(thing, root_module.PropertyProxy): # descriptor
80 setattr(self.__class__, symbol, thing)
81 return getattr(self, symbol)
82 # normal member
83 return thing
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rootpy/utils/quickroot.py b/rootpy/utils/quickroot.py
--- a/rootpy/utils/quickroot.py
+++ b/rootpy/utils/quickroot.py
@@ -65,6 +65,12 @@
if Load(libname) == 0:
log.debug("Loaded {0} (required by {1})".format(
libname, symbol))
+ elif lib == 'Gui':
+ # Possibly no X11 forwarding
+ log.debug("Unable to load {0} (required by {1}). "
+ "Putting ROOT in batch mode.".format(
+ libname, symbol))
+ ROOT.gROOT.SetBatch(True)
else: # pragma: no cover
raise RuntimeError(
"Unable to load {0} (required by {1})".format(
|
{"golden_diff": "diff --git a/rootpy/utils/quickroot.py b/rootpy/utils/quickroot.py\n--- a/rootpy/utils/quickroot.py\n+++ b/rootpy/utils/quickroot.py\n@@ -65,6 +65,12 @@\n if Load(libname) == 0:\n log.debug(\"Loaded {0} (required by {1})\".format(\n libname, symbol))\n+ elif lib == 'Gui':\n+ # Possibly no X11 forwarding\n+ log.debug(\"Unable to load {0} (required by {1}). \"\n+ \"Putting ROOT in batch mode.\".format(\n+ libname, symbol))\n+ ROOT.gROOT.SetBatch(True)\n else: # pragma: no cover\n raise RuntimeError(\n \"Unable to load {0} (required by {1})\".format(\n", "issue": "Exception on import when not forwarding X11\nDear developers,\r\n\r\nI believe I'm experiencing a bug when trying to use rootpy over SSH. Simply importing\r\n```Python\r\nfrom rootpy.plotting import Hist\r\n```\r\nresults in an exception:\r\n```Python\r\nWARNING:ROOT.TUnixSystem.SetDisplay] DISPLAY not set, setting it to :pts/0:S.8\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/__init__.py\", line 12, in <module>\r\n from .legend import Legend\r\n File \"/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py\", line 318, in _importhook\r\n return _orig_ihook( name, *args, **kwds )\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/legend.py\", line 8, in <module>\r\n from .box import _Positionable\r\n File \"/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py\", line 318, in _importhook\r\n return _orig_ihook( name, *args, **kwds )\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/box.py\", line 5, in <module>\r\n from .utils import canvases_with\r\n File \"/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py\", line 318, in _importhook\r\n return _orig_ihook( name, *args, **kwds )\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/utils.py\", line 7, in <module>\r\n from .canvas import _PadBase\r\n File \"/gridsoft/ipnls/root/v6.10.04/lib/ROOT.py\", line 318, in _importhook\r\n return _orig_ihook( name, *args, **kwds )\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/plotting/canvas.py\", line 186, in <module>\r\n class Pad(_PadBase, QROOT.TPad):\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/utils/module_facade.py\", line 84, in __getattr__\r\n result = sup.__getattr__(key)\r\n File \"/home/cms/popov/.local/lib/python3.5/site-packages/rootpy/utils/quickroot.py\", line 71, in __getattr__\r\n libname, symbol))\r\nRuntimeError: Unable to load libGui (required by TPad)\r\n```\r\nThe problem does not occur if I connect with `ssh -Y`, but I would expect rootpy be usable also without GUI.\r\n\r\nI'm using rootpy 1.0.0 installed with pip, Python 3.5.3, ROOT 6.10.04 with Scientific Linux 6.5.\n", "before_files": [{"content": "\"\"\"\nQuickly load ROOT symbols without triggering PyROOT's finalSetup().\nThe main principle is that appropriate dictionaries first need to be loaded.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport ROOT\n\nfrom .. import log; log = log[__name__]\nfrom .module_facade import Facade\n\n__all__ = []\n\n\nroot_module = ROOT.module._root\nif hasattr(root_module, 'LookupCppEntity'): # pragma: no cover\n lookup_func = 'LookupCppEntity'\nelse: # pragma: no cover\n lookup_func = 'LookupRootEntity'\n\n# Quick's __name__ needs to be the ROOT module for this to be transparent.\n# The below is one way of obtaining such a function\n# First determine the ROOT version without triggering PyROOT's finalSetup()\nQuick = eval('lambda symbol: module._root.{0}(symbol)'.format(lookup_func),\n ROOT.__dict__)\n\n_gSystem = Quick(\"gSystem\")\nLoad = _gSystem.Load\n\n# It is not vital to list _all_ symbols in here, just enough that a library\n# will be loaded by the time it is needed.\nSYMBOLS = dict(\n Hist='TH1 TGraph TGraphAsymmErrors',\n Tree='TCut TTree',\n Gui='TPad TCanvas',\n Graf='TLegend TLine TEllipse',\n Physics='TVector2 TVector3 TLorentzVector TRotation TLorentzRotation',\n Matrix='TMatrixT',\n RooStats='RooStats RooMsgService',\n RooFit='RooFit RooWorkspace',\n)\n\n# Mapping of symbols to libraries which need to be loaded\nSYMBOLS_TO_LIB = dict(\n (sym, lib) for lib, syms in SYMBOLS.items() for sym in syms.split())\n\n# If you encounter problems with particular symbols, add them to this set.\nSLOW = set(\"\".split())\n\n\n@Facade(__name__, expose_internal=False)\nclass QuickROOT(object):\n def __getattr__(self, symbol):\n if symbol in SLOW: # pragma: no cover\n log.warning(\n \"Tried to quickly load {0} which is always slow\".format(symbol))\n\n lib = SYMBOLS_TO_LIB.get(symbol, None)\n if lib:\n # Load() doesn't cost anything if the library is already loaded\n libname = \"lib{0}\".format(lib)\n if libname not in _gSystem.GetLibraries():\n regex = \"^duplicate entry .* for level 0; ignored$\"\n with log[\"/ROOT.TEnvRec.ChangeValue\"].ignore(regex):\n if Load(libname) == 0:\n log.debug(\"Loaded {0} (required by {1})\".format(\n libname, symbol))\n else: # pragma: no cover\n raise RuntimeError(\n \"Unable to load {0} (required by {1})\".format(\n libname, symbol))\n\n try:\n thing = Quick(symbol)\n except NameError: # pragma: no cover\n # NameError: global name 'module' is not defined\n # Python must be exiting...\n return None\n if isinstance(thing, root_module.PropertyProxy): # descriptor\n setattr(self.__class__, symbol, thing)\n return getattr(self, symbol)\n # normal member\n return thing\n", "path": "rootpy/utils/quickroot.py"}], "after_files": [{"content": "\"\"\"\nQuickly load ROOT symbols without triggering PyROOT's finalSetup().\nThe main principle is that appropriate dictionaries first need to be loaded.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport ROOT\n\nfrom .. import log; log = log[__name__]\nfrom .module_facade import Facade\n\n__all__ = []\n\n\nroot_module = ROOT.module._root\nif hasattr(root_module, 'LookupCppEntity'): # pragma: no cover\n lookup_func = 'LookupCppEntity'\nelse: # pragma: no cover\n lookup_func = 'LookupRootEntity'\n\n# Quick's __name__ needs to be the ROOT module for this to be transparent.\n# The below is one way of obtaining such a function\n# First determine the ROOT version without triggering PyROOT's finalSetup()\nQuick = eval('lambda symbol: module._root.{0}(symbol)'.format(lookup_func),\n ROOT.__dict__)\n\n_gSystem = Quick(\"gSystem\")\nLoad = _gSystem.Load\n\n# It is not vital to list _all_ symbols in here, just enough that a library\n# will be loaded by the time it is needed.\nSYMBOLS = dict(\n Hist='TH1 TGraph TGraphAsymmErrors',\n Tree='TCut TTree',\n Gui='TPad TCanvas',\n Graf='TLegend TLine TEllipse',\n Physics='TVector2 TVector3 TLorentzVector TRotation TLorentzRotation',\n Matrix='TMatrixT',\n RooStats='RooStats RooMsgService',\n RooFit='RooFit RooWorkspace',\n)\n\n# Mapping of symbols to libraries which need to be loaded\nSYMBOLS_TO_LIB = dict(\n (sym, lib) for lib, syms in SYMBOLS.items() for sym in syms.split())\n\n# If you encounter problems with particular symbols, add them to this set.\nSLOW = set(\"\".split())\n\n\n@Facade(__name__, expose_internal=False)\nclass QuickROOT(object):\n def __getattr__(self, symbol):\n if symbol in SLOW: # pragma: no cover\n log.warning(\n \"Tried to quickly load {0} which is always slow\".format(symbol))\n\n lib = SYMBOLS_TO_LIB.get(symbol, None)\n if lib:\n # Load() doesn't cost anything if the library is already loaded\n libname = \"lib{0}\".format(lib)\n if libname not in _gSystem.GetLibraries():\n regex = \"^duplicate entry .* for level 0; ignored$\"\n with log[\"/ROOT.TEnvRec.ChangeValue\"].ignore(regex):\n if Load(libname) == 0:\n log.debug(\"Loaded {0} (required by {1})\".format(\n libname, symbol))\n elif lib == 'Gui':\n # Possibly no X11 forwarding\n log.debug(\"Unable to load {0} (required by {1}). \"\n \"Putting ROOT in batch mode.\".format(\n libname, symbol))\n ROOT.gROOT.SetBatch(True)\n else: # pragma: no cover\n raise RuntimeError(\n \"Unable to load {0} (required by {1})\".format(\n libname, symbol))\n\n try:\n thing = Quick(symbol)\n except NameError: # pragma: no cover\n # NameError: global name 'module' is not defined\n # Python must be exiting...\n return None\n if isinstance(thing, root_module.PropertyProxy): # descriptor\n setattr(self.__class__, symbol, thing)\n return getattr(self, symbol)\n # normal member\n return thing\n", "path": "rootpy/utils/quickroot.py"}]}
| 1,801 | 179 |
gh_patches_debug_37733
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1520
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pydecimal unnecessarily limited by float's max digits
* Faker version: master at time of writing https://github.com/joke2k/faker/commit/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c
* OS: Linux
Python's `Decimal` can be arbitrarily many digits; with default precision 28. Faker's `pydecimal` uses `pyfloat`, and so gets limited to `sys.float_info.dig`, which is appropriate for `pyfloat` but not really relevant for `pydecimal`. (The Decimal context could even be less than that.)
### Steps to reproduce
1. `pydecimal(left_digits=16)`
### Expected behavior
Get a 16 digit Decimal
### Actual behavior
> ValueError: Asking for too many digits (16 + 0 == 16 > 15)
pydecimal unnecessarily limited by float's max digits
* Faker version: master at time of writing https://github.com/joke2k/faker/commit/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c
* OS: Linux
Python's `Decimal` can be arbitrarily many digits; with default precision 28. Faker's `pydecimal` uses `pyfloat`, and so gets limited to `sys.float_info.dig`, which is appropriate for `pyfloat` but not really relevant for `pydecimal`. (The Decimal context could even be less than that.)
### Steps to reproduce
1. `pydecimal(left_digits=16)`
### Expected behavior
Get a 16 digit Decimal
### Actual behavior
> ValueError: Asking for too many digits (16 + 0 == 16 > 15)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/python/__init__.py`
Content:
```
1 import math
2 import string
3 import sys
4 import warnings
5
6 from decimal import Decimal
7
8 from .. import BaseProvider
9
10
11 class Provider(BaseProvider):
12 default_value_types = (
13 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',
14 'date_time', 'uri', 'email',
15 )
16
17 def _check_signature(self, value_types, allowed_types):
18 if value_types is not None and not isinstance(value_types, (list, tuple)):
19 value_types = [value_types]
20 warnings.warn(
21 'Passing value types as positional arguments is going to be '
22 'deprecated. Pass them as a list or tuple instead.',
23 PendingDeprecationWarning,
24 )
25 if value_types is None:
26 value_types = ()
27 return tuple(value_types) + allowed_types
28
29 def pybool(self):
30 return self.random_int(0, 1) == 1
31
32 def pystr(self, min_chars=None, max_chars=20):
33 """
34 Generates a random string of upper and lowercase letters.
35 :type min_chars: int
36 :type max_chars: int
37 :return: String. Random of random length between min and max characters.
38 """
39 if min_chars is None:
40 return "".join(self.random_letters(length=max_chars))
41 else:
42 assert (
43 max_chars >= min_chars), "Maximum length must be greater than or equal to minimum length"
44 return "".join(
45 self.random_letters(
46 length=self.generator.random.randint(min_chars, max_chars),
47 ),
48 )
49
50 def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):
51 return self.bothify(self.generator.parse(string_format), letters=letters)
52
53 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
54 min_value=None, max_value=None):
55 if left_digits is not None and left_digits < 0:
56 raise ValueError(
57 'A float number cannot have less than 0 digits in its '
58 'integer part')
59 if right_digits is not None and right_digits < 0:
60 raise ValueError(
61 'A float number cannot have less than 0 digits in its '
62 'fractional part')
63 if left_digits == 0 and right_digits == 0:
64 raise ValueError(
65 'A float number cannot have less than 0 digits in total')
66 if None not in (min_value, max_value) and min_value > max_value:
67 raise ValueError('Min value cannot be greater than max value')
68 if None not in (min_value, max_value) and min_value == max_value:
69 raise ValueError('Min and max value cannot be the same')
70 if positive and min_value is not None and min_value <= 0:
71 raise ValueError(
72 'Cannot combine positive=True with negative or zero min_value')
73 if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:
74 raise ValueError('Max value must fit within left digits')
75 if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:
76 raise ValueError('Min value must fit within left digits')
77
78 # Make sure at least either left or right is set
79 if left_digits is None and right_digits is None:
80 needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))
81 right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)
82
83 # If only one side is set, choose #digits for other side
84 if (left_digits is None) ^ (right_digits is None):
85 if left_digits is None:
86 left_digits = max(1, sys.float_info.dig - right_digits)
87 else:
88 right_digits = max(1, sys.float_info.dig - left_digits)
89
90 # Make sure we don't ask for too many digits!
91 if left_digits + right_digits > sys.float_info.dig:
92 raise ValueError(
93 f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '
94 f'{sys.float_info.dig})',
95 )
96
97 sign = ''
98 if (min_value is not None) or (max_value is not None):
99 # Make sure left_digits still respected
100 if left_digits is not None:
101 if max_value is None:
102 max_value = 10 ** left_digits # minus smallest representable, adjusted later
103 if min_value is None:
104 min_value = -(10 ** left_digits) # plus smallest representable, adjusted later
105
106 if max_value is not None and max_value < 0:
107 max_value += 1 # as the random_int will be generated up to max_value - 1
108 if min_value is not None and min_value < 0:
109 min_value += 1 # as we then append digits after the left_number
110 left_number = self._safe_random_int(
111 min_value, max_value, positive,
112 )
113 else:
114 sign = '+' if positive else self.random_element(('+', '-'))
115 left_number = self.random_number(left_digits)
116
117 result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')
118 if positive and result == 0:
119 if right_digits:
120 result = float('0.' + '0' * (right_digits - 1) + '1')
121 else:
122 result += sys.float_info.epsilon
123
124 if right_digits:
125 result = min(result, 10 ** left_digits - float(f'0.{"0" * (right_digits - 1)}1'))
126 result = max(result, -(10 ** left_digits + float(f'0.{"0" * (right_digits - 1)}1')))
127 else:
128 result = min(result, 10 ** left_digits - 1)
129 result = max(result, -(10 ** left_digits + 1))
130
131 return result
132
133 def _safe_random_int(self, min_value, max_value, positive):
134 orig_min_value = min_value
135 orig_max_value = max_value
136
137 if min_value is None:
138 min_value = max_value - self.random_int()
139 if max_value is None:
140 max_value = min_value + self.random_int()
141 if positive:
142 min_value = max(min_value, 0)
143
144 if min_value == max_value:
145 return self._safe_random_int(orig_min_value, orig_max_value, positive)
146 else:
147 return self.random_int(min_value, max_value - 1)
148
149 def pyint(self, min_value=0, max_value=9999, step=1):
150 return self.generator.random_int(min_value, max_value, step=step)
151
152 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
153 min_value=None, max_value=None):
154
155 float_ = self.pyfloat(
156 left_digits, right_digits, positive, min_value, max_value)
157 return Decimal(str(float_))
158
159 def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
160 return tuple(
161 self._pyiterable(
162 nb_elements,
163 variable_nb_elements,
164 value_types,
165 *allowed_types))
166
167 def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
168 return set(
169 self._pyiterable(
170 nb_elements,
171 variable_nb_elements,
172 value_types,
173 *allowed_types))
174
175 def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
176 return list(
177 self._pyiterable(
178 nb_elements,
179 variable_nb_elements,
180 value_types,
181 *allowed_types))
182
183 def pyiterable(
184 self,
185 nb_elements=10,
186 variable_nb_elements=True,
187 value_types=None,
188 *allowed_types):
189 value_types = self._check_signature(value_types, allowed_types)
190 return self.random_element([self.pylist, self.pytuple, self.pyset])(
191 nb_elements, variable_nb_elements, value_types, *allowed_types)
192
193 def _random_type(self, type_list):
194 value_type = self.random_element(type_list)
195
196 method_name = f'py{value_type}'
197 if hasattr(self, method_name):
198 value_type = method_name
199
200 return self.generator.format(value_type)
201
202 def _pyiterable(
203 self,
204 nb_elements=10,
205 variable_nb_elements=True,
206 value_types=None,
207 *allowed_types):
208
209 value_types = self._check_signature(value_types, allowed_types)
210
211 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
212 for t in value_types
213 # avoid recursion
214 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
215 if not value_types:
216 value_types = self.default_value_types
217
218 if variable_nb_elements:
219 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
220
221 for _ in range(nb_elements):
222 yield self._random_type(value_types)
223
224 def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
225 """
226 Returns a dictionary.
227
228 :nb_elements: number of elements for dictionary
229 :variable_nb_elements: is use variable number of elements for dictionary
230 :value_types: type of dictionary values
231 """
232 if variable_nb_elements:
233 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
234
235 return dict(zip(
236 self.generator.words(nb_elements, unique=True),
237 self._pyiterable(nb_elements, False, value_types, *allowed_types),
238 ))
239
240 def pystruct(self, count=10, value_types=None, *allowed_types):
241 value_types = self._check_signature(value_types, allowed_types)
242
243 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
244 for t in value_types
245 # avoid recursion
246 if t != 'struct']
247 if not value_types:
248 value_types = self.default_value_types
249
250 types = []
251 d = {}
252 nd = {}
253 for i in range(count):
254 d[self.generator.word()] = self._random_type(value_types)
255 types.append(self._random_type(value_types))
256 nd[self.generator.word()] = {i: self._random_type(value_types),
257 i + 1: [self._random_type(value_types),
258 self._random_type(value_types),
259 self._random_type(value_types)],
260 i + 2: {i: self._random_type(value_types),
261 i + 1: self._random_type(value_types),
262 i + 2: [self._random_type(value_types),
263 self._random_type(value_types)]}}
264 return types, d, nd
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -151,10 +151,58 @@
def pydecimal(self, left_digits=None, right_digits=None, positive=False,
min_value=None, max_value=None):
+ if left_digits is not None and left_digits < 0:
+ raise ValueError(
+ 'A decimal number cannot have less than 0 digits in its '
+ 'integer part')
+ if right_digits is not None and right_digits < 0:
+ raise ValueError(
+ 'A decimal number cannot have less than 0 digits in its '
+ 'fractional part')
+ if (left_digits is not None and left_digits == 0) and (right_digits is not None and right_digits == 0):
+ raise ValueError(
+ 'A decimal number cannot have 0 digits in total')
+ if None not in (min_value, max_value) and min_value > max_value:
+ raise ValueError('Min value cannot be greater than max value')
+ if None not in (min_value, max_value) and min_value == max_value:
+ raise ValueError('Min and max value cannot be the same')
+ if positive and min_value is not None and min_value <= 0:
+ raise ValueError(
+ 'Cannot combine positive=True with negative or zero min_value')
+ if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:
+ raise ValueError('Max value must fit within left digits')
+ if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:
+ raise ValueError('Min value must fit within left digits')
+
+ # if either left or right digits are not specified we randomly choose a length
+ max_random_digits = 100
+ minimum_left_digits = len(str(min_value)) if min_value is not None else 1
+ if left_digits is None and right_digits is None:
+ right_digits = self.random_int(1, max_random_digits)
+ left_digits = self.random_int(minimum_left_digits, max_random_digits)
+ if left_digits is not None and right_digits is None:
+ right_digits = self.random_int(1, max_random_digits)
+ if left_digits is None and right_digits is not None:
+ left_digits = self.random_int(minimum_left_digits, max_random_digits)
- float_ = self.pyfloat(
- left_digits, right_digits, positive, min_value, max_value)
- return Decimal(str(float_))
+ sign = ''
+ left_number = ''.join([str(self.random_digit()) for i in range(0, left_digits)]) or '0'
+ if right_digits is not None:
+ right_number = ''.join([str(self.random_digit()) for i in range(0, right_digits)])
+ else:
+ right_number = ''
+ sign = '+' if positive else self.random_element(('+', '-'))
+
+ result = Decimal(f'{sign}{left_number}.{right_number}')
+
+ # Because the random result might have the same number of decimals as max_value the random number
+ # might be above max_value or below min_value
+ if max_value is not None and result > max_value:
+ result = max_value
+ if min_value is not None and result < min_value:
+ result = min_value
+
+ return result
def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
return tuple(
|
{"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -151,10 +151,58 @@\n \n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n+ if left_digits is not None and left_digits < 0:\n+ raise ValueError(\n+ 'A decimal number cannot have less than 0 digits in its '\n+ 'integer part')\n+ if right_digits is not None and right_digits < 0:\n+ raise ValueError(\n+ 'A decimal number cannot have less than 0 digits in its '\n+ 'fractional part')\n+ if (left_digits is not None and left_digits == 0) and (right_digits is not None and right_digits == 0):\n+ raise ValueError(\n+ 'A decimal number cannot have 0 digits in total')\n+ if None not in (min_value, max_value) and min_value > max_value:\n+ raise ValueError('Min value cannot be greater than max value')\n+ if None not in (min_value, max_value) and min_value == max_value:\n+ raise ValueError('Min and max value cannot be the same')\n+ if positive and min_value is not None and min_value <= 0:\n+ raise ValueError(\n+ 'Cannot combine positive=True with negative or zero min_value')\n+ if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n+ raise ValueError('Max value must fit within left digits')\n+ if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n+ raise ValueError('Min value must fit within left digits')\n+\n+ # if either left or right digits are not specified we randomly choose a length\n+ max_random_digits = 100\n+ minimum_left_digits = len(str(min_value)) if min_value is not None else 1\n+ if left_digits is None and right_digits is None:\n+ right_digits = self.random_int(1, max_random_digits)\n+ left_digits = self.random_int(minimum_left_digits, max_random_digits)\n+ if left_digits is not None and right_digits is None:\n+ right_digits = self.random_int(1, max_random_digits)\n+ if left_digits is None and right_digits is not None:\n+ left_digits = self.random_int(minimum_left_digits, max_random_digits)\n \n- float_ = self.pyfloat(\n- left_digits, right_digits, positive, min_value, max_value)\n- return Decimal(str(float_))\n+ sign = ''\n+ left_number = ''.join([str(self.random_digit()) for i in range(0, left_digits)]) or '0'\n+ if right_digits is not None:\n+ right_number = ''.join([str(self.random_digit()) for i in range(0, right_digits)])\n+ else:\n+ right_number = ''\n+ sign = '+' if positive else self.random_element(('+', '-'))\n+\n+ result = Decimal(f'{sign}{left_number}.{right_number}')\n+\n+ # Because the random result might have the same number of decimals as max_value the random number\n+ # might be above max_value or below min_value\n+ if max_value is not None and result > max_value:\n+ result = max_value\n+ if min_value is not None and result < min_value:\n+ result = min_value\n+\n+ return result\n \n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n", "issue": "pydecimal unnecessarily limited by float's max digits\n* Faker version: master at time of writing https://github.com/joke2k/faker/commit/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c\r\n* OS: Linux\r\n\r\nPython's `Decimal` can be arbitrarily many digits; with default precision 28. Faker's `pydecimal` uses `pyfloat`, and so gets limited to `sys.float_info.dig`, which is appropriate for `pyfloat` but not really relevant for `pydecimal`. (The Decimal context could even be less than that.)\r\n\r\n### Steps to reproduce\r\n\r\n1. `pydecimal(left_digits=16)`\r\n\r\n### Expected behavior\r\n\r\nGet a 16 digit Decimal\r\n\r\n### Actual behavior\r\n\r\n> ValueError: Asking for too many digits (16 + 0 == 16 > 15)\r\n\npydecimal unnecessarily limited by float's max digits\n* Faker version: master at time of writing https://github.com/joke2k/faker/commit/d9f4b00b9134e6dfbb09cc1caa81c912b79c3c7c\r\n* OS: Linux\r\n\r\nPython's `Decimal` can be arbitrarily many digits; with default precision 28. Faker's `pydecimal` uses `pyfloat`, and so gets limited to `sys.float_info.dig`, which is appropriate for `pyfloat` but not really relevant for `pydecimal`. (The Decimal context could even be less than that.)\r\n\r\n### Steps to reproduce\r\n\r\n1. `pydecimal(left_digits=16)`\r\n\r\n### Expected behavior\r\n\r\nGet a 16 digit Decimal\r\n\r\n### Actual behavior\r\n\r\n> ValueError: Asking for too many digits (16 + 0 == 16 > 15)\r\n\n", "before_files": [{"content": "import math\nimport string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n raise ValueError('Max value must fit within left digits')\n if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n raise ValueError('Min value must fit within left digits')\n\n # Make sure at least either left or right is set\n if left_digits is None and right_digits is None:\n needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))\n right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)\n\n # If only one side is set, choose #digits for other side\n if (left_digits is None) ^ (right_digits is None):\n if left_digits is None:\n left_digits = max(1, sys.float_info.dig - right_digits)\n else:\n right_digits = max(1, sys.float_info.dig - left_digits)\n\n # Make sure we don't ask for too many digits!\n if left_digits + right_digits > sys.float_info.dig:\n raise ValueError(\n f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '\n f'{sys.float_info.dig})',\n )\n\n sign = ''\n if (min_value is not None) or (max_value is not None):\n # Make sure left_digits still respected\n if left_digits is not None:\n if max_value is None:\n max_value = 10 ** left_digits # minus smallest representable, adjusted later\n if min_value is None:\n min_value = -(10 ** left_digits) # plus smallest representable, adjusted later\n\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n\n if right_digits:\n result = min(result, 10 ** left_digits - float(f'0.{\"0\" * (right_digits - 1)}1'))\n result = max(result, -(10 ** left_digits + float(f'0.{\"0\" * (right_digits - 1)}1')))\n else:\n result = min(result, 10 ** left_digits - 1)\n result = max(result, -(10 ** left_digits + 1))\n\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}], "after_files": [{"content": "import math\nimport string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n raise ValueError('Max value must fit within left digits')\n if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n raise ValueError('Min value must fit within left digits')\n\n # Make sure at least either left or right is set\n if left_digits is None and right_digits is None:\n needed_left_digits = max(1, math.ceil(math.log10(max(abs(max_value or 1), abs(min_value or 1)))))\n right_digits = self.random_int(1, sys.float_info.dig - needed_left_digits)\n\n # If only one side is set, choose #digits for other side\n if (left_digits is None) ^ (right_digits is None):\n if left_digits is None:\n left_digits = max(1, sys.float_info.dig - right_digits)\n else:\n right_digits = max(1, sys.float_info.dig - left_digits)\n\n # Make sure we don't ask for too many digits!\n if left_digits + right_digits > sys.float_info.dig:\n raise ValueError(\n f'Asking for too many digits ({left_digits} + {right_digits} == {left_digits + right_digits} > '\n f'{sys.float_info.dig})',\n )\n\n sign = ''\n if (min_value is not None) or (max_value is not None):\n # Make sure left_digits still respected\n if left_digits is not None:\n if max_value is None:\n max_value = 10 ** left_digits # minus smallest representable, adjusted later\n if min_value is None:\n min_value = -(10 ** left_digits) # plus smallest representable, adjusted later\n\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n\n if right_digits:\n result = min(result, 10 ** left_digits - float(f'0.{\"0\" * (right_digits - 1)}1'))\n result = max(result, -(10 ** left_digits + float(f'0.{\"0\" * (right_digits - 1)}1')))\n else:\n result = min(result, 10 ** left_digits - 1)\n result = max(result, -(10 ** left_digits + 1))\n\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A decimal number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A decimal number cannot have less than 0 digits in its '\n 'fractional part')\n if (left_digits is not None and left_digits == 0) and (right_digits is not None and right_digits == 0):\n raise ValueError(\n 'A decimal number cannot have 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n if left_digits is not None and max_value and math.ceil(math.log10(abs(max_value))) > left_digits:\n raise ValueError('Max value must fit within left digits')\n if left_digits is not None and min_value and math.ceil(math.log10(abs(min_value))) > left_digits:\n raise ValueError('Min value must fit within left digits')\n\n # if either left or right digits are not specified we randomly choose a length\n max_random_digits = 100\n minimum_left_digits = len(str(min_value)) if min_value is not None else 1\n if left_digits is None and right_digits is None:\n right_digits = self.random_int(1, max_random_digits)\n left_digits = self.random_int(minimum_left_digits, max_random_digits)\n if left_digits is not None and right_digits is None:\n right_digits = self.random_int(1, max_random_digits)\n if left_digits is None and right_digits is not None:\n left_digits = self.random_int(minimum_left_digits, max_random_digits)\n\n sign = ''\n left_number = ''.join([str(self.random_digit()) for i in range(0, left_digits)]) or '0'\n if right_digits is not None:\n right_number = ''.join([str(self.random_digit()) for i in range(0, right_digits)])\n else:\n right_number = ''\n sign = '+' if positive else self.random_element(('+', '-'))\n\n result = Decimal(f'{sign}{left_number}.{right_number}')\n\n # Because the random result might have the same number of decimals as max_value the random number\n # might be above max_value or below min_value\n if max_value is not None and result > max_value:\n result = max_value\n if min_value is not None and result < min_value:\n result = min_value\n\n return result\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]}
| 3,716 | 810 |
gh_patches_debug_38068
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-4589
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation and code in `floyd_warshall_numpy` are inconsistent
### Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
Using `floyd_warshall_numpy` with a specified set of nodes will only find paths that are confined to that subset of nodes. I'm not sure I agree with that choice, and certainly the documentation does not make it clear.
### Expected Behavior
<!--- Tell us what should happen -->
Based on the documentation, I would expect it to find a path that starts at one node and ends at another, even if that path must go through additional nodes not in the provided list.
### Steps to Reproduce
<!--- Provide a minimal example that reproduces the bug -->
https://stackoverflow.com/q/65771537/2966723
### Environment
<!--- Please provide details about your local environment -->
Python version: 3.9
NetworkX version: 2.5
### Additional context
<!--- Add any other context about the problem here, screenshots, etc. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/shortest_paths/dense.py`
Content:
```
1 """Floyd-Warshall algorithm for shortest paths.
2 """
3 import networkx as nx
4
5 __all__ = [
6 "floyd_warshall",
7 "floyd_warshall_predecessor_and_distance",
8 "reconstruct_path",
9 "floyd_warshall_numpy",
10 ]
11
12
13 def floyd_warshall_numpy(G, nodelist=None, weight="weight"):
14 """Find all-pairs shortest path lengths using Floyd's algorithm.
15
16 Parameters
17 ----------
18 G : NetworkX graph
19
20 nodelist : list, optional
21 The rows and columns are ordered by the nodes in nodelist.
22 If nodelist is None then the ordering is produced by G.nodes().
23
24 weight: string, optional (default= 'weight')
25 Edge data key corresponding to the edge weight.
26
27 Returns
28 -------
29 distance : NumPy matrix
30 A matrix of shortest path distances between nodes.
31 If there is no path between to nodes the corresponding matrix entry
32 will be Inf.
33
34 Notes
35 -----
36 Floyd's algorithm is appropriate for finding shortest paths in
37 dense graphs or graphs with negative weights when Dijkstra's
38 algorithm fails. This algorithm can still fail if there are negative
39 cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
40 """
41 import numpy as np
42
43 # To handle cases when an edge has weight=0, we must make sure that
44 # nonedges are not given the value 0 as well.
45 A = nx.to_numpy_array(
46 G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf
47 )
48 n, m = A.shape
49 np.fill_diagonal(A, 0) # diagonal elements should be zero
50 for i in range(n):
51 # The second term has the same shape as A due to broadcasting
52 A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis])
53 return A
54
55
56 def floyd_warshall_predecessor_and_distance(G, weight="weight"):
57 """Find all-pairs shortest path lengths using Floyd's algorithm.
58
59 Parameters
60 ----------
61 G : NetworkX graph
62
63 weight: string, optional (default= 'weight')
64 Edge data key corresponding to the edge weight.
65
66 Returns
67 -------
68 predecessor,distance : dictionaries
69 Dictionaries, keyed by source and target, of predecessors and distances
70 in the shortest path.
71
72 Examples
73 --------
74 >>> G = nx.DiGraph()
75 >>> G.add_weighted_edges_from(
76 ... [
77 ... ("s", "u", 10),
78 ... ("s", "x", 5),
79 ... ("u", "v", 1),
80 ... ("u", "x", 2),
81 ... ("v", "y", 1),
82 ... ("x", "u", 3),
83 ... ("x", "v", 5),
84 ... ("x", "y", 2),
85 ... ("y", "s", 7),
86 ... ("y", "v", 6),
87 ... ]
88 ... )
89 >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G)
90 >>> print(nx.reconstruct_path("s", "v", predecessors))
91 ['s', 'x', 'u', 'v']
92
93 Notes
94 -----
95 Floyd's algorithm is appropriate for finding shortest paths
96 in dense graphs or graphs with negative weights when Dijkstra's algorithm
97 fails. This algorithm can still fail if there are negative cycles.
98 It has running time $O(n^3)$ with running space of $O(n^2)$.
99
100 See Also
101 --------
102 floyd_warshall
103 floyd_warshall_numpy
104 all_pairs_shortest_path
105 all_pairs_shortest_path_length
106 """
107 from collections import defaultdict
108
109 # dictionary-of-dictionaries representation for dist and pred
110 # use some defaultdict magick here
111 # for dist the default is the floating point inf value
112 dist = defaultdict(lambda: defaultdict(lambda: float("inf")))
113 for u in G:
114 dist[u][u] = 0
115 pred = defaultdict(dict)
116 # initialize path distance dictionary to be the adjacency matrix
117 # also set the distance to self to 0 (zero diagonal)
118 undirected = not G.is_directed()
119 for u, v, d in G.edges(data=True):
120 e_weight = d.get(weight, 1.0)
121 dist[u][v] = min(e_weight, dist[u][v])
122 pred[u][v] = u
123 if undirected:
124 dist[v][u] = min(e_weight, dist[v][u])
125 pred[v][u] = v
126 for w in G:
127 dist_w = dist[w] # save recomputation
128 for u in G:
129 dist_u = dist[u] # save recomputation
130 for v in G:
131 d = dist_u[w] + dist_w[v]
132 if dist_u[v] > d:
133 dist_u[v] = d
134 pred[u][v] = pred[w][v]
135 return dict(pred), dict(dist)
136
137
138 def reconstruct_path(source, target, predecessors):
139 """Reconstruct a path from source to target using the predecessors
140 dict as returned by floyd_warshall_predecessor_and_distance
141
142 Parameters
143 ----------
144 source : node
145 Starting node for path
146
147 target : node
148 Ending node for path
149
150 predecessors: dictionary
151 Dictionary, keyed by source and target, of predecessors in the
152 shortest path, as returned by floyd_warshall_predecessor_and_distance
153
154 Returns
155 -------
156 path : list
157 A list of nodes containing the shortest path from source to target
158
159 If source and target are the same, an empty list is returned
160
161 Notes
162 -----
163 This function is meant to give more applicability to the
164 floyd_warshall_predecessor_and_distance function
165
166 See Also
167 --------
168 floyd_warshall_predecessor_and_distance
169 """
170 if source == target:
171 return []
172 prev = predecessors[source]
173 curr = prev[target]
174 path = [target, curr]
175 while curr != source:
176 curr = prev[curr]
177 path.append(curr)
178 return list(reversed(path))
179
180
181 def floyd_warshall(G, weight="weight"):
182 """Find all-pairs shortest path lengths using Floyd's algorithm.
183
184 Parameters
185 ----------
186 G : NetworkX graph
187
188 weight: string, optional (default= 'weight')
189 Edge data key corresponding to the edge weight.
190
191
192 Returns
193 -------
194 distance : dict
195 A dictionary, keyed by source and target, of shortest paths distances
196 between nodes.
197
198 Notes
199 -----
200 Floyd's algorithm is appropriate for finding shortest paths
201 in dense graphs or graphs with negative weights when Dijkstra's algorithm
202 fails. This algorithm can still fail if there are negative cycles.
203 It has running time $O(n^3)$ with running space of $O(n^2)$.
204
205 See Also
206 --------
207 floyd_warshall_predecessor_and_distance
208 floyd_warshall_numpy
209 all_pairs_shortest_path
210 all_pairs_shortest_path_length
211 """
212 # could make this its own function to reduce memory costs
213 return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/algorithms/shortest_paths/dense.py b/networkx/algorithms/shortest_paths/dense.py
--- a/networkx/algorithms/shortest_paths/dense.py
+++ b/networkx/algorithms/shortest_paths/dense.py
@@ -13,37 +13,57 @@
def floyd_warshall_numpy(G, nodelist=None, weight="weight"):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
+ This algorithm for finding shortest paths takes advantage of
+ matrix representations of a graph and works well for dense
+ graphs where all-pairs shortest path lengths are desired.
+ The results are returned as a NumPy array, distance[i, j],
+ where i and j are the indexes of two nodes in nodelist.
+ The entry distance[i, j] is the distance along a shortest
+ path from i to j. If no path exists the distance is Inf.
+
Parameters
----------
G : NetworkX graph
- nodelist : list, optional
+ nodelist : list, optional (default=G.nodes)
The rows and columns are ordered by the nodes in nodelist.
- If nodelist is None then the ordering is produced by G.nodes().
+ If nodelist is None then the ordering is produced by G.nodes.
+ Nodelist should include all nodes in G.
- weight: string, optional (default= 'weight')
+ weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
- If there is no path between to nodes the corresponding matrix entry
- will be Inf.
+ If there is no path between two nodes the value is Inf.
Notes
-----
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are negative
- cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
+ cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
+
+ Raises
+ ------
+ NetworkXError
+ If nodelist is not a list of the nodes in G.
"""
import numpy as np
+ if nodelist is not None:
+ if not (len(nodelist) == len(G) == len(set(nodelist))):
+ raise nx.NetworkXError(
+ "nodelist must contain every node in G with no repeats."
+ "If you wanted a subgraph of G use G.subgraph(nodelist)"
+ )
+
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_array(
- G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf
+ G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf
)
n, m = A.shape
np.fill_diagonal(A, 0) # diagonal elements should be zero
|
{"golden_diff": "diff --git a/networkx/algorithms/shortest_paths/dense.py b/networkx/algorithms/shortest_paths/dense.py\n--- a/networkx/algorithms/shortest_paths/dense.py\n+++ b/networkx/algorithms/shortest_paths/dense.py\n@@ -13,37 +13,57 @@\n def floyd_warshall_numpy(G, nodelist=None, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n \n+ This algorithm for finding shortest paths takes advantage of\n+ matrix representations of a graph and works well for dense\n+ graphs where all-pairs shortest path lengths are desired.\n+ The results are returned as a NumPy array, distance[i, j],\n+ where i and j are the indexes of two nodes in nodelist.\n+ The entry distance[i, j] is the distance along a shortest\n+ path from i to j. If no path exists the distance is Inf.\n+\n Parameters\n ----------\n G : NetworkX graph\n \n- nodelist : list, optional\n+ nodelist : list, optional (default=G.nodes)\n The rows and columns are ordered by the nodes in nodelist.\n- If nodelist is None then the ordering is produced by G.nodes().\n+ If nodelist is None then the ordering is produced by G.nodes.\n+ Nodelist should include all nodes in G.\n \n- weight: string, optional (default= 'weight')\n+ weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n \n Returns\n -------\n distance : NumPy matrix\n A matrix of shortest path distances between nodes.\n- If there is no path between to nodes the corresponding matrix entry\n- will be Inf.\n+ If there is no path between two nodes the value is Inf.\n \n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths in\n dense graphs or graphs with negative weights when Dijkstra's\n algorithm fails. This algorithm can still fail if there are negative\n- cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.\n+ cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.\n+\n+ Raises\n+ ------\n+ NetworkXError\n+ If nodelist is not a list of the nodes in G.\n \"\"\"\n import numpy as np\n \n+ if nodelist is not None:\n+ if not (len(nodelist) == len(G) == len(set(nodelist))):\n+ raise nx.NetworkXError(\n+ \"nodelist must contain every node in G with no repeats.\"\n+ \"If you wanted a subgraph of G use G.subgraph(nodelist)\"\n+ )\n+\n # To handle cases when an edge has weight=0, we must make sure that\n # nonedges are not given the value 0 as well.\n A = nx.to_numpy_array(\n- G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf\n+ G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf\n )\n n, m = A.shape\n np.fill_diagonal(A, 0) # diagonal elements should be zero\n", "issue": "Documentation and code in `floyd_warshall_numpy` are inconsistent\n### Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n\r\nUsing `floyd_warshall_numpy` with a specified set of nodes will only find paths that are confined to that subset of nodes. I'm not sure I agree with that choice, and certainly the documentation does not make it clear.\r\n\r\n### Expected Behavior\r\n<!--- Tell us what should happen -->\r\n\r\nBased on the documentation, I would expect it to find a path that starts at one node and ends at another, even if that path must go through additional nodes not in the provided list.\r\n\r\n### Steps to Reproduce\r\n<!--- Provide a minimal example that reproduces the bug -->\r\n\r\nhttps://stackoverflow.com/q/65771537/2966723\r\n\r\n### Environment\r\n<!--- Please provide details about your local environment -->\r\nPython version: 3.9\r\nNetworkX version: 2.5\r\n\r\n\r\n### Additional context\r\n<!--- Add any other context about the problem here, screenshots, etc. -->\r\n\n", "before_files": [{"content": "\"\"\"Floyd-Warshall algorithm for shortest paths.\n\"\"\"\nimport networkx as nx\n\n__all__ = [\n \"floyd_warshall\",\n \"floyd_warshall_predecessor_and_distance\",\n \"reconstruct_path\",\n \"floyd_warshall_numpy\",\n]\n\n\ndef floyd_warshall_numpy(G, nodelist=None, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n Parameters\n ----------\n G : NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered by the nodes in nodelist.\n If nodelist is None then the ordering is produced by G.nodes().\n\n weight: string, optional (default= 'weight')\n Edge data key corresponding to the edge weight.\n\n Returns\n -------\n distance : NumPy matrix\n A matrix of shortest path distances between nodes.\n If there is no path between to nodes the corresponding matrix entry\n will be Inf.\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths in\n dense graphs or graphs with negative weights when Dijkstra's\n algorithm fails. This algorithm can still fail if there are negative\n cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.\n \"\"\"\n import numpy as np\n\n # To handle cases when an edge has weight=0, we must make sure that\n # nonedges are not given the value 0 as well.\n A = nx.to_numpy_array(\n G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf\n )\n n, m = A.shape\n np.fill_diagonal(A, 0) # diagonal elements should be zero\n for i in range(n):\n # The second term has the same shape as A due to broadcasting\n A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis])\n return A\n\n\ndef floyd_warshall_predecessor_and_distance(G, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight: string, optional (default= 'weight')\n Edge data key corresponding to the edge weight.\n\n Returns\n -------\n predecessor,distance : dictionaries\n Dictionaries, keyed by source and target, of predecessors and distances\n in the shortest path.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_weighted_edges_from(\n ... [\n ... (\"s\", \"u\", 10),\n ... (\"s\", \"x\", 5),\n ... (\"u\", \"v\", 1),\n ... (\"u\", \"x\", 2),\n ... (\"v\", \"y\", 1),\n ... (\"x\", \"u\", 3),\n ... (\"x\", \"v\", 5),\n ... (\"x\", \"y\", 2),\n ... (\"y\", \"s\", 7),\n ... (\"y\", \"v\", 6),\n ... ]\n ... )\n >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G)\n >>> print(nx.reconstruct_path(\"s\", \"v\", predecessors))\n ['s', 'x', 'u', 'v']\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths\n in dense graphs or graphs with negative weights when Dijkstra's algorithm\n fails. This algorithm can still fail if there are negative cycles.\n It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n See Also\n --------\n floyd_warshall\n floyd_warshall_numpy\n all_pairs_shortest_path\n all_pairs_shortest_path_length\n \"\"\"\n from collections import defaultdict\n\n # dictionary-of-dictionaries representation for dist and pred\n # use some defaultdict magick here\n # for dist the default is the floating point inf value\n dist = defaultdict(lambda: defaultdict(lambda: float(\"inf\")))\n for u in G:\n dist[u][u] = 0\n pred = defaultdict(dict)\n # initialize path distance dictionary to be the adjacency matrix\n # also set the distance to self to 0 (zero diagonal)\n undirected = not G.is_directed()\n for u, v, d in G.edges(data=True):\n e_weight = d.get(weight, 1.0)\n dist[u][v] = min(e_weight, dist[u][v])\n pred[u][v] = u\n if undirected:\n dist[v][u] = min(e_weight, dist[v][u])\n pred[v][u] = v\n for w in G:\n dist_w = dist[w] # save recomputation\n for u in G:\n dist_u = dist[u] # save recomputation\n for v in G:\n d = dist_u[w] + dist_w[v]\n if dist_u[v] > d:\n dist_u[v] = d\n pred[u][v] = pred[w][v]\n return dict(pred), dict(dist)\n\n\ndef reconstruct_path(source, target, predecessors):\n \"\"\"Reconstruct a path from source to target using the predecessors\n dict as returned by floyd_warshall_predecessor_and_distance\n\n Parameters\n ----------\n source : node\n Starting node for path\n\n target : node\n Ending node for path\n\n predecessors: dictionary\n Dictionary, keyed by source and target, of predecessors in the\n shortest path, as returned by floyd_warshall_predecessor_and_distance\n\n Returns\n -------\n path : list\n A list of nodes containing the shortest path from source to target\n\n If source and target are the same, an empty list is returned\n\n Notes\n -----\n This function is meant to give more applicability to the\n floyd_warshall_predecessor_and_distance function\n\n See Also\n --------\n floyd_warshall_predecessor_and_distance\n \"\"\"\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))\n\n\ndef floyd_warshall(G, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight: string, optional (default= 'weight')\n Edge data key corresponding to the edge weight.\n\n\n Returns\n -------\n distance : dict\n A dictionary, keyed by source and target, of shortest paths distances\n between nodes.\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths\n in dense graphs or graphs with negative weights when Dijkstra's algorithm\n fails. This algorithm can still fail if there are negative cycles.\n It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n See Also\n --------\n floyd_warshall_predecessor_and_distance\n floyd_warshall_numpy\n all_pairs_shortest_path\n all_pairs_shortest_path_length\n \"\"\"\n # could make this its own function to reduce memory costs\n return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]\n", "path": "networkx/algorithms/shortest_paths/dense.py"}], "after_files": [{"content": "\"\"\"Floyd-Warshall algorithm for shortest paths.\n\"\"\"\nimport networkx as nx\n\n__all__ = [\n \"floyd_warshall\",\n \"floyd_warshall_predecessor_and_distance\",\n \"reconstruct_path\",\n \"floyd_warshall_numpy\",\n]\n\n\ndef floyd_warshall_numpy(G, nodelist=None, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n This algorithm for finding shortest paths takes advantage of\n matrix representations of a graph and works well for dense\n graphs where all-pairs shortest path lengths are desired.\n The results are returned as a NumPy array, distance[i, j],\n where i and j are the indexes of two nodes in nodelist.\n The entry distance[i, j] is the distance along a shortest\n path from i to j. If no path exists the distance is Inf.\n\n Parameters\n ----------\n G : NetworkX graph\n\n nodelist : list, optional (default=G.nodes)\n The rows and columns are ordered by the nodes in nodelist.\n If nodelist is None then the ordering is produced by G.nodes.\n Nodelist should include all nodes in G.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n\n Returns\n -------\n distance : NumPy matrix\n A matrix of shortest path distances between nodes.\n If there is no path between two nodes the value is Inf.\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths in\n dense graphs or graphs with negative weights when Dijkstra's\n algorithm fails. This algorithm can still fail if there are negative\n cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n Raises\n ------\n NetworkXError\n If nodelist is not a list of the nodes in G.\n \"\"\"\n import numpy as np\n\n if nodelist is not None:\n if not (len(nodelist) == len(G) == len(set(nodelist))):\n raise nx.NetworkXError(\n \"nodelist must contain every node in G with no repeats.\"\n \"If you wanted a subgraph of G use G.subgraph(nodelist)\"\n )\n\n # To handle cases when an edge has weight=0, we must make sure that\n # nonedges are not given the value 0 as well.\n A = nx.to_numpy_array(\n G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf\n )\n n, m = A.shape\n np.fill_diagonal(A, 0) # diagonal elements should be zero\n for i in range(n):\n # The second term has the same shape as A due to broadcasting\n A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis])\n return A\n\n\ndef floyd_warshall_predecessor_and_distance(G, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight: string, optional (default= 'weight')\n Edge data key corresponding to the edge weight.\n\n Returns\n -------\n predecessor,distance : dictionaries\n Dictionaries, keyed by source and target, of predecessors and distances\n in the shortest path.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_weighted_edges_from(\n ... [\n ... (\"s\", \"u\", 10),\n ... (\"s\", \"x\", 5),\n ... (\"u\", \"v\", 1),\n ... (\"u\", \"x\", 2),\n ... (\"v\", \"y\", 1),\n ... (\"x\", \"u\", 3),\n ... (\"x\", \"v\", 5),\n ... (\"x\", \"y\", 2),\n ... (\"y\", \"s\", 7),\n ... (\"y\", \"v\", 6),\n ... ]\n ... )\n >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G)\n >>> print(nx.reconstruct_path(\"s\", \"v\", predecessors))\n ['s', 'x', 'u', 'v']\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths\n in dense graphs or graphs with negative weights when Dijkstra's algorithm\n fails. This algorithm can still fail if there are negative cycles.\n It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n See Also\n --------\n floyd_warshall\n floyd_warshall_numpy\n all_pairs_shortest_path\n all_pairs_shortest_path_length\n \"\"\"\n from collections import defaultdict\n\n # dictionary-of-dictionaries representation for dist and pred\n # use some defaultdict magick here\n # for dist the default is the floating point inf value\n dist = defaultdict(lambda: defaultdict(lambda: float(\"inf\")))\n for u in G:\n dist[u][u] = 0\n pred = defaultdict(dict)\n # initialize path distance dictionary to be the adjacency matrix\n # also set the distance to self to 0 (zero diagonal)\n undirected = not G.is_directed()\n for u, v, d in G.edges(data=True):\n e_weight = d.get(weight, 1.0)\n dist[u][v] = min(e_weight, dist[u][v])\n pred[u][v] = u\n if undirected:\n dist[v][u] = min(e_weight, dist[v][u])\n pred[v][u] = v\n for w in G:\n dist_w = dist[w] # save recomputation\n for u in G:\n dist_u = dist[u] # save recomputation\n for v in G:\n d = dist_u[w] + dist_w[v]\n if dist_u[v] > d:\n dist_u[v] = d\n pred[u][v] = pred[w][v]\n return dict(pred), dict(dist)\n\n\ndef reconstruct_path(source, target, predecessors):\n \"\"\"Reconstruct a path from source to target using the predecessors\n dict as returned by floyd_warshall_predecessor_and_distance\n\n Parameters\n ----------\n source : node\n Starting node for path\n\n target : node\n Ending node for path\n\n predecessors: dictionary\n Dictionary, keyed by source and target, of predecessors in the\n shortest path, as returned by floyd_warshall_predecessor_and_distance\n\n Returns\n -------\n path : list\n A list of nodes containing the shortest path from source to target\n\n If source and target are the same, an empty list is returned\n\n Notes\n -----\n This function is meant to give more applicability to the\n floyd_warshall_predecessor_and_distance function\n\n See Also\n --------\n floyd_warshall_predecessor_and_distance\n \"\"\"\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))\n\n\ndef floyd_warshall(G, weight=\"weight\"):\n \"\"\"Find all-pairs shortest path lengths using Floyd's algorithm.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight: string, optional (default= 'weight')\n Edge data key corresponding to the edge weight.\n\n\n Returns\n -------\n distance : dict\n A dictionary, keyed by source and target, of shortest paths distances\n between nodes.\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths\n in dense graphs or graphs with negative weights when Dijkstra's algorithm\n fails. This algorithm can still fail if there are negative cycles.\n It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n See Also\n --------\n floyd_warshall_predecessor_and_distance\n floyd_warshall_numpy\n all_pairs_shortest_path\n all_pairs_shortest_path_length\n \"\"\"\n # could make this its own function to reduce memory costs\n return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]\n", "path": "networkx/algorithms/shortest_paths/dense.py"}]}
| 2,634 | 712 |
gh_patches_debug_21555
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-845
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conflicts rendering Category pages when category is not defined in consistent case
I was testing a jinja macro that dealt with creating links for categories.
I noted that if you define a category in one article as `Category: Something` and in another article as `Category: something` that these are treated as separate categories, however, when your category page is rendered, there is only the lowecase url, e.g. `category/something.html`. This will only associate with the articles with meta data defined as `Category: something` and not anywhere where it is defined with uppercase since there is no `category/Something.html`.
I am not sure if making this case insensitive would break code. Certainly, it would be unclear when printing the category name which case to use. From an intelligent template process, you would set you case using CSS style attribute to be sure it was the way you want, and it could always render categories in lower case.
Otherwise, it might just be sufficient to put this into the documentation. I always tend to capitalize by categories, but some people might not notice and wonder why some articles are missing. I have not yet tested this, but I would imagine the same issue exists for tags.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/urlwrappers.py`
Content:
```
1 import os
2 import functools
3 import logging
4
5 import six
6
7 from pelican.utils import (slugify, python_2_unicode_compatible)
8
9 logger = logging.getLogger(__name__)
10
11
12 @python_2_unicode_compatible
13 @functools.total_ordering
14 class URLWrapper(object):
15 def __init__(self, name, settings):
16 self.name = name
17 self.slug = slugify(self.name)
18 self.settings = settings
19
20 def as_dict(self):
21 return self.__dict__
22
23 def __hash__(self):
24 return hash(self.name)
25
26 def _key(self):
27 return self.name
28
29 def _normalize_key(self, key):
30 return six.text_type(key)
31
32 def __eq__(self, other):
33 return self._key() == self._normalize_key(other)
34
35 def __ne__(self, other):
36 return self._key() != self._normalize_key(other)
37
38 def __lt__(self, other):
39 return self._key() < self._normalize_key(other)
40
41 def __str__(self):
42 return self.name
43
44 def __repr__(self):
45 return '<{} {}>'.format(type(self).__name__, str(self))
46
47 def _from_settings(self, key, get_page_name=False):
48 """Returns URL information as defined in settings.
49
50 When get_page_name=True returns URL without anything after {slug} e.g.
51 if in settings: CATEGORY_URL="cat/{slug}.html" this returns
52 "cat/{slug}" Useful for pagination.
53
54 """
55 setting = "%s_%s" % (self.__class__.__name__.upper(), key)
56 value = self.settings[setting]
57 if not isinstance(value, six.string_types):
58 logger.warning('%s is set to %s' % (setting, value))
59 return value
60 else:
61 if get_page_name:
62 return os.path.splitext(value)[0].format(**self.as_dict())
63 else:
64 return value.format(**self.as_dict())
65
66 page_name = property(functools.partial(_from_settings, key='URL',
67 get_page_name=True))
68 url = property(functools.partial(_from_settings, key='URL'))
69 save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
70
71
72 class Category(URLWrapper):
73 pass
74
75
76 class Tag(URLWrapper):
77 def __init__(self, name, *args, **kwargs):
78 super(Tag, self).__init__(name.strip(), *args, **kwargs)
79
80
81 class Author(URLWrapper):
82 pass
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -13,21 +13,35 @@
@functools.total_ordering
class URLWrapper(object):
def __init__(self, name, settings):
+ # next 2 lines are redundant with the setter of the name property
+ # but are here for clarity
+ self._name = name
+ self.slug = slugify(name)
self.name = name
- self.slug = slugify(self.name)
self.settings = settings
+ @property
+ def name(self):
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ self._name = name
+ self.slug = slugify(name)
+
def as_dict(self):
- return self.__dict__
+ d = self.__dict__
+ d['name'] = self.name
+ return d
def __hash__(self):
- return hash(self.name)
+ return hash(self.slug)
def _key(self):
- return self.name
+ return self.slug
def _normalize_key(self, key):
- return six.text_type(key)
+ return six.text_type(slugify(key))
def __eq__(self, other):
return self._key() == self._normalize_key(other)
|
{"golden_diff": "diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py\n--- a/pelican/urlwrappers.py\n+++ b/pelican/urlwrappers.py\n@@ -13,21 +13,35 @@\n @functools.total_ordering\n class URLWrapper(object):\n def __init__(self, name, settings):\n+ # next 2 lines are redundant with the setter of the name property\n+ # but are here for clarity\n+ self._name = name\n+ self.slug = slugify(name)\n self.name = name\n- self.slug = slugify(self.name)\n self.settings = settings\n \n+ @property\n+ def name(self):\n+ return self._name\n+\n+ @name.setter\n+ def name(self, name):\n+ self._name = name\n+ self.slug = slugify(name)\n+\n def as_dict(self):\n- return self.__dict__\n+ d = self.__dict__\n+ d['name'] = self.name\n+ return d\n \n def __hash__(self):\n- return hash(self.name)\n+ return hash(self.slug)\n \n def _key(self):\n- return self.name\n+ return self.slug\n \n def _normalize_key(self, key):\n- return six.text_type(key)\n+ return six.text_type(slugify(key))\n \n def __eq__(self, other):\n return self._key() == self._normalize_key(other)\n", "issue": "Conflicts rendering Category pages when category is not defined in consistent case\nI was testing a jinja macro that dealt with creating links for categories.\n\nI noted that if you define a category in one article as `Category: Something` and in another article as `Category: something` that these are treated as separate categories, however, when your category page is rendered, there is only the lowecase url, e.g. `category/something.html`. This will only associate with the articles with meta data defined as `Category: something` and not anywhere where it is defined with uppercase since there is no `category/Something.html`.\n\nI am not sure if making this case insensitive would break code. Certainly, it would be unclear when printing the category name which case to use. From an intelligent template process, you would set you case using CSS style attribute to be sure it was the way you want, and it could always render categories in lower case.\n\nOtherwise, it might just be sufficient to put this into the documentation. I always tend to capitalize by categories, but some people might not notice and wonder why some articles are missing. I have not yet tested this, but I would imagine the same issue exists for tags.\n\n", "before_files": [{"content": "import os\nimport functools\nimport logging\n\nimport six\n\nfrom pelican.utils import (slugify, python_2_unicode_compatible)\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n self.name = name\n self.slug = slugify(self.name)\n self.settings = settings\n\n def as_dict(self):\n return self.__dict__\n\n def __hash__(self):\n return hash(self.name)\n\n def _key(self):\n return self.name\n\n def _normalize_key(self, key):\n return six.text_type(key)\n\n def __eq__(self, other):\n return self._key() == self._normalize_key(other)\n\n def __ne__(self, other):\n return self._key() != self._normalize_key(other)\n\n def __lt__(self, other):\n return self._key() < self._normalize_key(other)\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, str(self))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s' % (setting, value))\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n pass\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n\nclass Author(URLWrapper):\n pass\n", "path": "pelican/urlwrappers.py"}], "after_files": [{"content": "import os\nimport functools\nimport logging\n\nimport six\n\nfrom pelican.utils import (slugify, python_2_unicode_compatible)\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n # next 2 lines are redundant with the setter of the name property\n # but are here for clarity\n self._name = name\n self.slug = slugify(name)\n self.name = name\n self.settings = settings\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n self.slug = slugify(name)\n\n def as_dict(self):\n d = self.__dict__\n d['name'] = self.name\n return d\n\n def __hash__(self):\n return hash(self.slug)\n\n def _key(self):\n return self.slug\n\n def _normalize_key(self, key):\n return six.text_type(slugify(key))\n\n def __eq__(self, other):\n return self._key() == self._normalize_key(other)\n\n def __ne__(self, other):\n return self._key() != self._normalize_key(other)\n\n def __lt__(self, other):\n return self._key() < self._normalize_key(other)\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, str(self))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s' % (setting, value))\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n pass\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n\nclass Author(URLWrapper):\n pass\n", "path": "pelican/urlwrappers.py"}]}
| 1,201 | 320 |
gh_patches_debug_3713
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-948
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Context fails when a particular value is present in any register
### Description
If a particular value is present in any register ctx throws
```
Cannot access memory at address 0x7ffffffff000
```
It seems to happen at 128TB split but curiously only with unaligned addresses that would cause a qword read to cross 128TB,
so 128TB-{1..7} throws but neither does 128TB-8 or 128TB
Full backtrace
```
Traceback (most recent call last):
File "/opt/pwndbg/pwndbg/commands/__init__.py", line 130, in __call__
return self.function(*args, **kwargs)
File "/opt/pwndbg/pwndbg/commands/__init__.py", line 221, in _OnlyWhenRunning
return function(*a, **kw)
File "/opt/pwndbg/pwndbg/commands/context.py", line 269, in context
result[target].extend(func(target=out,
File "/opt/pwndbg/pwndbg/commands/context.py", line 350, in context_regs
regs = get_regs()
File "/opt/pwndbg/pwndbg/commands/context.py", line 405, in get_regs
desc = pwndbg.chain.format(value)
File "/opt/pwndbg/pwndbg/chain.py", line 112, in format
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
File "/opt/pwndbg/pwndbg/enhance.py", line 109, in enhance
intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))
gdb.MemoryError: Cannot access memory at address 0x7ffffffff000
```
### Steps to reproduce
```asm
.globl _start
_start:
mov $0x7fffffffeff8, %rdx # no err
mov $0x7fffffffeffa, %rdx # err
mov $0x7fffffffefff, %rdx # err
mov $0x7ffffffff000, %rdx # no err
int3
```
```sh
as test.s -o test.o ; ld -e _start test.o -o test
```
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
Platform: Linux-5.13.9_1-x86_64-with-glibc2.32
Gdb: 10.2
Python: 3.9.6 (default, Jul 6 2021, 18:29:50) [GCC 10.2.1 20201203]
Pwndbg: 1.1.0 build: b9e7bf1
Capstone: 4.0.1024
Unicorn: 1.0.3
This GDB was configured as follows:
configure --host=x86_64-unknown-linux-gnu --target=x86_64-unknown-linux-gnu
--with-auto-load-dir=$debugdir:$datadir/auto-load
--with-auto-load-safe-path=$debugdir:$datadir/auto-load
--with-expat
--with-gdb-datadir=/usr/share/gdb (relocatable)
--with-jit-reader-dir=/usr/lib64/gdb (relocatable)
--without-libunwind-ia64
--with-lzma
--without-babeltrace
--without-intel-pt
--without-mpfr
--without-xxhash
--with-python=/usr (relocatable)
--with-python-libdir=/usr/lib (relocatable)
--with-debuginfod
--without-guile
--disable-source-highlight
--with-separate-debug-dir=/usr/lib64/debug (relocatable)
--with-system-gdbinit=/etc/gdb/gdbinit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/enhance.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Given an address in memory which does not contain a pointer elsewhere
5 into memory, attempt to describe the data as best as possible.
6
7 Currently prints out code, integers, or strings, in a best-effort manner
8 dependent on page permissions, the contents of the data, and any
9 supplemental information sources (e.g. active IDA Pro connection).
10 """
11
12 import string
13
14 import gdb
15
16 import pwndbg.arch
17 import pwndbg.color as color
18 import pwndbg.color.enhance as E
19 import pwndbg.config
20 import pwndbg.disasm
21 import pwndbg.memoize
22 import pwndbg.memory
23 import pwndbg.strings
24 import pwndbg.symbol
25 import pwndbg.typeinfo
26 from pwndbg.color.syntax_highlight import syntax_highlight
27
28 bad_instrs = [
29 '.byte',
30 '.long',
31 'rex.R',
32 'rex.XB',
33 '.inst',
34 '(bad)'
35 ]
36
37 def good_instr(i):
38 return not any(bad in i for bad in bad_instrs)
39
40 def int_str(value):
41 retval = '%#x' % int(value & pwndbg.arch.ptrmask)
42
43 # Try to unpack the value as a string
44 packed = pwndbg.arch.pack(int(value))
45 if all(c in string.printable.encode('utf-8') for c in packed):
46 if len(retval) > 4:
47 retval = '%s (%r)' % (retval, str(packed.decode('ascii', 'ignore')))
48
49 return retval
50
51
52 # @pwndbg.memoize.reset_on_stop
53 def enhance(value, code = True):
54 """
55 Given the last pointer in a chain, attempt to characterize
56
57 Note that 'the last pointer in a chain' may not at all actually be a pointer.
58
59 Additionally, optimizations are made based on various sources of data for
60 'value'. For example, if it is set to RWX, we try to get information on whether
61 it resides on the stack, or in a RW section that *happens* to be RWX, to
62 determine which order to print the fields.
63
64 Arguments:
65 value(obj): Value to enhance
66 code(bool): Hint that indicates the value may be an instruction
67 """
68 value = int(value)
69
70 name = pwndbg.symbol.get(value) or None
71 page = pwndbg.vmmap.find(value)
72
73 # If it's not in a page we know about, try to dereference
74 # it anyway just to test.
75 can_read = True
76 if not page or None == pwndbg.memory.peek(value):
77 can_read = False
78
79 if not can_read:
80 return E.integer(int_str(value))
81
82 # It's mapped memory, or we can at least read it.
83 # Try to find out if it's a string.
84 instr = None
85 exe = page and page.execute
86 rwx = page and page.rwx
87
88 # For the purpose of following pointers, don't display
89 # anything on the stack or heap as 'code'
90 if '[stack' in page.objfile or '[heap' in page.objfile:
91 rwx = exe = False
92
93 # If IDA doesn't think it's in a function, don't display it as code.
94 if pwndbg.ida.available() and not pwndbg.ida.GetFunctionName(value):
95 rwx = exe = False
96
97 if exe:
98 instr = pwndbg.disasm.one(value)
99 if instr:
100 instr = "%-6s %s" % (instr.mnemonic, instr.op_str)
101 if pwndbg.config.syntax_highlight:
102 instr = syntax_highlight(instr)
103
104 szval = pwndbg.strings.get(value) or None
105 szval0 = szval
106 if szval:
107 szval = E.string(repr(szval))
108
109 intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))
110 intval0 = intval
111 if 0 <= intval < 10:
112 intval = E.integer(str(intval))
113 else:
114 intval = E.integer('%#x' % int(intval & pwndbg.arch.ptrmask))
115
116 retval = []
117
118 # print([instr,intval0,szval])
119 if not code:
120 instr = None
121
122 # If it's on the stack, don't display it as code in a chain.
123 if instr and 'stack' in page.objfile:
124 retval = [intval, szval]
125
126
127
128 # If it's RWX but a small value, don't display it as code in a chain.
129 elif instr and rwx and intval0 < 0x1000:
130 retval = [intval, szval]
131
132 # If it's an instruction and *not* RWX, display it unconditionally
133 elif instr and exe:
134 if not rwx:
135 if szval:
136 retval = [instr, szval]
137 else:
138 retval = [instr]
139 else:
140 retval = [instr, intval, szval]
141
142 # Otherwise strings have preference
143 elif szval:
144 if len(szval0) < pwndbg.arch.ptrsize:
145 retval = [intval, szval]
146 else:
147 retval = [szval]
148
149 # And then integer
150 else:
151 return E.integer(int_str(intval0))
152
153 retval = tuple(filter(lambda x: x is not None, retval))
154
155 if len(retval) == 0:
156 return E.unknown("???")
157
158 if len(retval) == 1:
159 return retval[0]
160
161 return retval[0] + E.comment(color.strip(' /* {} */'.format('; '.join(retval[1:]))))
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/enhance.py b/pwndbg/enhance.py
--- a/pwndbg/enhance.py
+++ b/pwndbg/enhance.py
@@ -106,6 +106,10 @@
if szval:
szval = E.string(repr(szval))
+ # Fix for case we can't read the end address anyway (#946)
+ if value + pwndbg.arch.ptrsize > page.end:
+ return E.integer(int_str(value))
+
intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))
intval0 = intval
if 0 <= intval < 10:
|
{"golden_diff": "diff --git a/pwndbg/enhance.py b/pwndbg/enhance.py\n--- a/pwndbg/enhance.py\n+++ b/pwndbg/enhance.py\n@@ -106,6 +106,10 @@\n if szval:\n szval = E.string(repr(szval))\n \n+ # Fix for case we can't read the end address anyway (#946)\n+ if value + pwndbg.arch.ptrsize > page.end:\n+ return E.integer(int_str(value))\n+\n intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))\n intval0 = intval\n if 0 <= intval < 10:\n", "issue": "Context fails when a particular value is present in any register\n### Description\r\nIf a particular value is present in any register ctx throws\r\n```\r\nCannot access memory at address 0x7ffffffff000\r\n```\r\nIt seems to happen at 128TB split but curiously only with unaligned addresses that would cause a qword read to cross 128TB,\r\nso 128TB-{1..7} throws but neither does 128TB-8 or 128TB\r\n\r\nFull backtrace\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/pwndbg/pwndbg/commands/__init__.py\", line 130, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/opt/pwndbg/pwndbg/commands/__init__.py\", line 221, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/opt/pwndbg/pwndbg/commands/context.py\", line 269, in context\r\n result[target].extend(func(target=out,\r\n File \"/opt/pwndbg/pwndbg/commands/context.py\", line 350, in context_regs\r\n regs = get_regs()\r\n File \"/opt/pwndbg/pwndbg/commands/context.py\", line 405, in get_regs\r\n desc = pwndbg.chain.format(value)\r\n File \"/opt/pwndbg/pwndbg/chain.py\", line 112, in format\r\n enhanced = pwndbg.enhance.enhance(chain[-1], code=code)\r\n File \"/opt/pwndbg/pwndbg/enhance.py\", line 109, in enhance\r\n intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))\r\ngdb.MemoryError: Cannot access memory at address 0x7ffffffff000\r\n```\r\n### Steps to reproduce\r\n\r\n```asm\r\n.globl _start\r\n_start:\r\n mov $0x7fffffffeff8, %rdx # no err\r\n mov $0x7fffffffeffa, %rdx # err\r\n mov $0x7fffffffefff, %rdx # err\r\n mov $0x7ffffffff000, %rdx # no err\r\n int3\r\n```\r\n```sh\r\nas test.s -o test.o ; ld -e _start test.o -o test\r\n```\r\n\r\n### My setup\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n\r\nPlatform: Linux-5.13.9_1-x86_64-with-glibc2.32\r\nGdb: 10.2\r\nPython: 3.9.6 (default, Jul 6 2021, 18:29:50) [GCC 10.2.1 20201203]\r\nPwndbg: 1.1.0 build: b9e7bf1\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.3\r\nThis GDB was configured as follows:\r\n configure --host=x86_64-unknown-linux-gnu --target=x86_64-unknown-linux-gnu\r\n --with-auto-load-dir=$debugdir:$datadir/auto-load\r\n --with-auto-load-safe-path=$debugdir:$datadir/auto-load\r\n --with-expat\r\n --with-gdb-datadir=/usr/share/gdb (relocatable)\r\n --with-jit-reader-dir=/usr/lib64/gdb (relocatable)\r\n --without-libunwind-ia64\r\n --with-lzma\r\n --without-babeltrace\r\n --without-intel-pt\r\n --without-mpfr\r\n --without-xxhash\r\n --with-python=/usr (relocatable)\r\n --with-python-libdir=/usr/lib (relocatable)\r\n --with-debuginfod\r\n --without-guile\r\n --disable-source-highlight\r\n --with-separate-debug-dir=/usr/lib64/debug (relocatable)\r\n --with-system-gdbinit=/etc/gdb/gdbinit\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGiven an address in memory which does not contain a pointer elsewhere\ninto memory, attempt to describe the data as best as possible.\n\nCurrently prints out code, integers, or strings, in a best-effort manner\ndependent on page permissions, the contents of the data, and any\nsupplemental information sources (e.g. active IDA Pro connection).\n\"\"\"\n\nimport string\n\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.color as color\nimport pwndbg.color.enhance as E\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.strings\nimport pwndbg.symbol\nimport pwndbg.typeinfo\nfrom pwndbg.color.syntax_highlight import syntax_highlight\n\nbad_instrs = [\n'.byte',\n'.long',\n'rex.R',\n'rex.XB',\n'.inst',\n'(bad)'\n]\n\ndef good_instr(i):\n return not any(bad in i for bad in bad_instrs)\n\ndef int_str(value):\n retval = '%#x' % int(value & pwndbg.arch.ptrmask)\n\n # Try to unpack the value as a string\n packed = pwndbg.arch.pack(int(value))\n if all(c in string.printable.encode('utf-8') for c in packed):\n if len(retval) > 4:\n retval = '%s (%r)' % (retval, str(packed.decode('ascii', 'ignore')))\n\n return retval\n\n\n# @pwndbg.memoize.reset_on_stop\ndef enhance(value, code = True):\n \"\"\"\n Given the last pointer in a chain, attempt to characterize\n\n Note that 'the last pointer in a chain' may not at all actually be a pointer.\n\n Additionally, optimizations are made based on various sources of data for\n 'value'. For example, if it is set to RWX, we try to get information on whether\n it resides on the stack, or in a RW section that *happens* to be RWX, to\n determine which order to print the fields.\n\n Arguments:\n value(obj): Value to enhance\n code(bool): Hint that indicates the value may be an instruction\n \"\"\"\n value = int(value)\n\n name = pwndbg.symbol.get(value) or None\n page = pwndbg.vmmap.find(value)\n\n # If it's not in a page we know about, try to dereference\n # it anyway just to test.\n can_read = True\n if not page or None == pwndbg.memory.peek(value):\n can_read = False\n\n if not can_read:\n return E.integer(int_str(value))\n\n # It's mapped memory, or we can at least read it.\n # Try to find out if it's a string.\n instr = None\n exe = page and page.execute\n rwx = page and page.rwx\n\n # For the purpose of following pointers, don't display\n # anything on the stack or heap as 'code'\n if '[stack' in page.objfile or '[heap' in page.objfile:\n rwx = exe = False\n\n # If IDA doesn't think it's in a function, don't display it as code.\n if pwndbg.ida.available() and not pwndbg.ida.GetFunctionName(value):\n rwx = exe = False\n\n if exe:\n instr = pwndbg.disasm.one(value)\n if instr:\n instr = \"%-6s %s\" % (instr.mnemonic, instr.op_str)\n if pwndbg.config.syntax_highlight:\n instr = syntax_highlight(instr)\n\n szval = pwndbg.strings.get(value) or None\n szval0 = szval\n if szval:\n szval = E.string(repr(szval))\n\n intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))\n intval0 = intval\n if 0 <= intval < 10:\n intval = E.integer(str(intval))\n else:\n intval = E.integer('%#x' % int(intval & pwndbg.arch.ptrmask))\n\n retval = []\n\n # print([instr,intval0,szval])\n if not code:\n instr = None\n\n # If it's on the stack, don't display it as code in a chain.\n if instr and 'stack' in page.objfile:\n retval = [intval, szval]\n\n\n\n # If it's RWX but a small value, don't display it as code in a chain.\n elif instr and rwx and intval0 < 0x1000:\n retval = [intval, szval]\n\n # If it's an instruction and *not* RWX, display it unconditionally\n elif instr and exe:\n if not rwx:\n if szval:\n retval = [instr, szval]\n else:\n retval = [instr]\n else:\n retval = [instr, intval, szval]\n\n # Otherwise strings have preference\n elif szval:\n if len(szval0) < pwndbg.arch.ptrsize:\n retval = [intval, szval]\n else:\n retval = [szval]\n\n # And then integer\n else:\n return E.integer(int_str(intval0))\n\n retval = tuple(filter(lambda x: x is not None, retval))\n\n if len(retval) == 0:\n return E.unknown(\"???\")\n\n if len(retval) == 1:\n return retval[0]\n\n return retval[0] + E.comment(color.strip(' /* {} */'.format('; '.join(retval[1:]))))\n", "path": "pwndbg/enhance.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGiven an address in memory which does not contain a pointer elsewhere\ninto memory, attempt to describe the data as best as possible.\n\nCurrently prints out code, integers, or strings, in a best-effort manner\ndependent on page permissions, the contents of the data, and any\nsupplemental information sources (e.g. active IDA Pro connection).\n\"\"\"\n\nimport string\n\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.color as color\nimport pwndbg.color.enhance as E\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.strings\nimport pwndbg.symbol\nimport pwndbg.typeinfo\nfrom pwndbg.color.syntax_highlight import syntax_highlight\n\nbad_instrs = [\n'.byte',\n'.long',\n'rex.R',\n'rex.XB',\n'.inst',\n'(bad)'\n]\n\ndef good_instr(i):\n return not any(bad in i for bad in bad_instrs)\n\ndef int_str(value):\n retval = '%#x' % int(value & pwndbg.arch.ptrmask)\n\n # Try to unpack the value as a string\n packed = pwndbg.arch.pack(int(value))\n if all(c in string.printable.encode('utf-8') for c in packed):\n if len(retval) > 4:\n retval = '%s (%r)' % (retval, str(packed.decode('ascii', 'ignore')))\n\n return retval\n\n\n# @pwndbg.memoize.reset_on_stop\ndef enhance(value, code = True):\n \"\"\"\n Given the last pointer in a chain, attempt to characterize\n\n Note that 'the last pointer in a chain' may not at all actually be a pointer.\n\n Additionally, optimizations are made based on various sources of data for\n 'value'. For example, if it is set to RWX, we try to get information on whether\n it resides on the stack, or in a RW section that *happens* to be RWX, to\n determine which order to print the fields.\n\n Arguments:\n value(obj): Value to enhance\n code(bool): Hint that indicates the value may be an instruction\n \"\"\"\n value = int(value)\n\n name = pwndbg.symbol.get(value) or None\n page = pwndbg.vmmap.find(value)\n\n # If it's not in a page we know about, try to dereference\n # it anyway just to test.\n can_read = True\n if not page or None == pwndbg.memory.peek(value):\n can_read = False\n\n if not can_read:\n return E.integer(int_str(value))\n\n # It's mapped memory, or we can at least read it.\n # Try to find out if it's a string.\n instr = None\n exe = page and page.execute\n rwx = page and page.rwx\n\n # For the purpose of following pointers, don't display\n # anything on the stack or heap as 'code'\n if '[stack' in page.objfile or '[heap' in page.objfile:\n rwx = exe = False\n\n # If IDA doesn't think it's in a function, don't display it as code.\n if pwndbg.ida.available() and not pwndbg.ida.GetFunctionName(value):\n rwx = exe = False\n\n if exe:\n instr = pwndbg.disasm.one(value)\n if instr:\n instr = \"%-6s %s\" % (instr.mnemonic, instr.op_str)\n if pwndbg.config.syntax_highlight:\n instr = syntax_highlight(instr)\n\n szval = pwndbg.strings.get(value) or None\n szval0 = szval\n if szval:\n szval = E.string(repr(szval))\n\n # Fix for case we can't read the end address anyway (#946)\n if value + pwndbg.arch.ptrsize > page.end:\n return E.integer(int_str(value))\n\n intval = int(pwndbg.memory.poi(pwndbg.typeinfo.pvoid, value))\n intval0 = intval\n if 0 <= intval < 10:\n intval = E.integer(str(intval))\n else:\n intval = E.integer('%#x' % int(intval & pwndbg.arch.ptrmask))\n\n retval = []\n\n # print([instr,intval0,szval])\n if not code:\n instr = None\n\n # If it's on the stack, don't display it as code in a chain.\n if instr and 'stack' in page.objfile:\n retval = [intval, szval]\n\n\n\n # If it's RWX but a small value, don't display it as code in a chain.\n elif instr and rwx and intval0 < 0x1000:\n retval = [intval, szval]\n\n # If it's an instruction and *not* RWX, display it unconditionally\n elif instr and exe:\n if not rwx:\n if szval:\n retval = [instr, szval]\n else:\n retval = [instr]\n else:\n retval = [instr, intval, szval]\n\n # Otherwise strings have preference\n elif szval:\n if len(szval0) < pwndbg.arch.ptrsize:\n retval = [intval, szval]\n else:\n retval = [szval]\n\n # And then integer\n else:\n return E.integer(int_str(intval0))\n\n retval = tuple(filter(lambda x: x is not None, retval))\n\n if len(retval) == 0:\n return E.unknown(\"???\")\n\n if len(retval) == 1:\n return retval[0]\n\n return retval[0] + E.comment(color.strip(' /* {} */'.format('; '.join(retval[1:]))))\n", "path": "pwndbg/enhance.py"}]}
| 2,904 | 153 |
gh_patches_debug_33365
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-370
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate log lines when using IPP
The following code fragment emits log lines to STDERR with the default format style even though no logging is requested. I suspect that `ipyparallel` is doing something dirty.
from parsl import DataFlowKernel
from parsl.configs.local import localIPP as config
dfk = DataFlowKernel(config=config)
dfk.cleanup()
The above code with the minor change of using threads will not emit the log lines.
from parsl import DataFlowKernel
from parsl.configs.local import localThreads as config
dfk = DataFlowKernel(config=config)
dfk.cleanup()
Please help test by running this with the latest parsl code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/dataflow/strategy.py`
Content:
```
1 import logging
2 import time
3 import math
4
5 logger = logging.getLogger(__name__)
6
7
8 class Strategy(object):
9 """FlowControl strategy.
10
11 As a workflow dag is processed by Parsl, new tasks are added and completed
12 asynchronously. Parsl interfaces executors with execution providers to construct
13 scalable executors to handle the variable work-load generated by the
14 workflow. This component is responsible for periodically checking outstanding
15 tasks and available compute capacity and trigger scaling events to match
16 workflow needs.
17
18 Here's a diagram of an executor. An executor consists of blocks, which are usually
19 created by single requests to a Local Resource Manager (LRM) such as slurm,
20 condor, torque, or even AWS API. The blocks could contain several task blocks
21 which are separate instances on workers.
22
23
24 .. code:: python
25
26 |<--min_blocks |<-init_blocks max_blocks-->|
27 +----------------------------------------------------------+
28 | +--------block----------+ +--------block--------+ |
29 executor = | | task task | ... | task task | |
30 | +-----------------------+ +---------------------+ |
31 +----------------------------------------------------------+
32
33 The relevant specification options are:
34 1. min_blocks: Minimum number of blocks to maintain
35 2. init_blocks: number of blocks to provision at initialization of workflow
36 3. max_blocks: Maximum number of blocks that can be active due to one workflow
37
38
39 .. code:: python
40
41 slots = current_capacity * tasks_per_node * nodes_per_block
42
43 active_tasks = pending_tasks + running_tasks
44
45 Parallelism = slots / tasks
46 = [0, 1] (i.e, 0 <= p <= 1)
47
48 For example:
49
50 When p = 0,
51 => compute with the least resources possible.
52 infinite tasks are stacked per slot.
53
54 .. code:: python
55
56 blocks = min_blocks { if active_tasks = 0
57 max(min_blocks, 1) { else
58
59 When p = 1,
60 => compute with the most resources.
61 one task is stacked per slot.
62
63 .. code:: python
64
65 blocks = min ( max_blocks,
66 ceil( active_tasks / slots ) )
67
68
69 When p = 1/2,
70 => We stack upto 2 tasks per slot before we overflow
71 and request a new block
72
73
74 let's say min:init:max = 0:0:4 and task_blocks=2
75 Consider the following example:
76 min_blocks = 0
77 init_blocks = 0
78 max_blocks = 4
79 tasks_per_node = 2
80 nodes_per_block = 1
81
82 In the diagram, X <- task
83
84 at 2 tasks:
85
86 .. code:: python
87
88 +---Block---|
89 | |
90 | X X |
91 |slot slot|
92 +-----------+
93
94 at 5 tasks, we overflow as the capacity of a single block is fully used.
95
96 .. code:: python
97
98 +---Block---| +---Block---|
99 | X X | ----> | |
100 | X X | | X |
101 |slot slot| |slot slot|
102 +-----------+ +-----------+
103
104 """
105
106 def __init__(self, dfk):
107 """Initialize strategy."""
108 self.dfk = dfk
109 self.config = dfk.config
110 self.executors = {}
111 self.max_idletime = 60 * 2 # 2 minutes
112
113 for e in self.dfk.config.executors:
114 self.executors[e.label] = {'idle_since': None, 'config': e.label}
115
116 self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}
117
118 self.strategize = self.strategies[self.config.strategy]
119
120 logger.debug("Scaling strategy: {0}".format(self.config.strategy))
121
122 def _strategy_noop(self, tasks, *args, kind=None, **kwargs):
123 """Do nothing.
124
125 Args:
126 - tasks (task_ids): Not used here.
127
128 KWargs:
129 - kind (Not used)
130 """
131
132 def _strategy_simple(self, tasks, *args, kind=None, **kwargs):
133 """Peek at the DFK and the executors specified.
134
135 We assume here that tasks are not held in a runnable
136 state, and that all tasks from an app would be sent to
137 a single specific executor, i.e tasks cannot be specified
138 to go to one of more executors.
139
140 Args:
141 - tasks (task_ids): Not used here.
142
143 KWargs:
144 - kind (Not used)
145 """
146 # Add logic to check executors
147 # for task in tasks :
148 # if self.dfk.tasks[task]:
149
150 for label, executor in self.dfk.executors.items():
151 if not executor.scaling_enabled:
152 continue
153
154 # Tasks that are either pending completion
155 active_tasks = executor.executor.outstanding
156
157 status = executor.status()
158
159 # FIXME we need to handle case where provider does not define these
160 # FIXME probably more of this logic should be moved to the provider
161 min_blocks = executor.provider.min_blocks
162 max_blocks = executor.provider.max_blocks
163 tasks_per_node = executor.provider.tasks_per_node
164 nodes_per_block = executor.provider.nodes_per_block
165 parallelism = executor.provider.parallelism
166
167 active_blocks = sum([1 for x in status if x in ('RUNNING',
168 'SUBMITTING',
169 'PENDING')])
170 active_slots = active_blocks * tasks_per_node * nodes_per_block
171
172 # import pdb; pdb.set_trace()
173 logger.debug("Tasks:{} Slots:{} Parallelism:{}".format(len(active_tasks),
174 active_slots,
175 parallelism))
176
177 # Case 1
178 # No tasks.
179 if len(active_tasks) == 0:
180 # Case 1a
181 # Fewer blocks that min_blocks
182 if active_blocks <= min_blocks:
183 # Ignore
184 # logger.debug("Strategy: Case.1a")
185 pass
186
187 # Case 1b
188 # More blocks than min_blocks. Scale down
189 else:
190 # We want to make sure that max_idletime is reached
191 # before killing off resources
192 if not self.executors[executor.label]['idle_since']:
193 logger.debug("Strategy: Scale_in, tasks=0 starting kill timer")
194 self.executors[executor.label]['idle_since'] = time.time()
195
196 idle_since = self.executors[executor.label]['idle_since']
197 if (time.time() - idle_since) > self.max_idletime:
198 # We have resources idle for the max duration,
199 # we have to scale_in now.
200 logger.debug("Strategy: Scale_in, tasks=0")
201 executor.scale_in(active_blocks - min_blocks)
202
203 else:
204 pass
205 # logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
206
207 # Case 2
208 # More tasks than the available slots.
209 elif (float(active_slots) / len(active_tasks)) < parallelism:
210 # Case 2a
211 # We have the max blocks possible
212 if active_blocks >= max_blocks:
213 # Ignore since we already have the max nodes
214 # logger.debug("Strategy: Case.2a")
215 pass
216
217 # Case 2b
218 else:
219 # logger.debug("Strategy: Case.2b")
220 excess = math.ceil((len(active_tasks) * parallelism) - active_slots)
221 excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))
222 logger.debug("Requesting {} more blocks".format(excess_blocks))
223 executor.scale_out(excess_blocks)
224
225 elif active_slots == 0 and len(active_tasks) > 0:
226 # Case 4
227 # Check if slots are being lost quickly ?
228 logger.debug("Requesting single slot")
229 executor.scale_out(1)
230 # Case 3
231 # tasks ~ slots
232 else:
233 # logger.debug("Strategy: Case 3")
234 pass
235
236
237 if __name__ == '__main__':
238
239 pass
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py
--- a/parsl/dataflow/strategy.py
+++ b/parsl/dataflow/strategy.py
@@ -116,6 +116,8 @@
self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}
self.strategize = self.strategies[self.config.strategy]
+ self.logger_flag = False
+ self.prior_loghandlers = set(logging.getLogger().handlers)
logger.debug("Scaling strategy: {0}".format(self.config.strategy))
@@ -129,6 +131,20 @@
- kind (Not used)
"""
+ def unset_logging(self):
+ """ Mute newly added handlers to the root level, right after calling executor.status
+ """
+ if self.logger_flag is True:
+ return
+
+ root_logger = logging.getLogger()
+
+ for hndlr in root_logger.handlers:
+ if hndlr not in self.prior_loghandlers:
+ hndlr.setLevel(logging.ERROR)
+
+ self.logger_flag = True
+
def _strategy_simple(self, tasks, *args, kind=None, **kwargs):
"""Peek at the DFK and the executors specified.
@@ -143,9 +159,6 @@
KWargs:
- kind (Not used)
"""
- # Add logic to check executors
- # for task in tasks :
- # if self.dfk.tasks[task]:
for label, executor in self.dfk.executors.items():
if not executor.scaling_enabled:
@@ -155,6 +168,7 @@
active_tasks = executor.executor.outstanding
status = executor.status()
+ self.unset_logging()
# FIXME we need to handle case where provider does not define these
# FIXME probably more of this logic should be moved to the provider
|
{"golden_diff": "diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py\n--- a/parsl/dataflow/strategy.py\n+++ b/parsl/dataflow/strategy.py\n@@ -116,6 +116,8 @@\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n \n self.strategize = self.strategies[self.config.strategy]\n+ self.logger_flag = False\n+ self.prior_loghandlers = set(logging.getLogger().handlers)\n \n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n \n@@ -129,6 +131,20 @@\n - kind (Not used)\n \"\"\"\n \n+ def unset_logging(self):\n+ \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n+ \"\"\"\n+ if self.logger_flag is True:\n+ return\n+\n+ root_logger = logging.getLogger()\n+\n+ for hndlr in root_logger.handlers:\n+ if hndlr not in self.prior_loghandlers:\n+ hndlr.setLevel(logging.ERROR)\n+\n+ self.logger_flag = True\n+\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n \n@@ -143,9 +159,6 @@\n KWargs:\n - kind (Not used)\n \"\"\"\n- # Add logic to check executors\n- # for task in tasks :\n- # if self.dfk.tasks[task]:\n \n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n@@ -155,6 +168,7 @@\n active_tasks = executor.executor.outstanding\n \n status = executor.status()\n+ self.unset_logging()\n \n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n", "issue": "Duplicate log lines when using IPP\nThe following code fragment emits log lines to STDERR with the default format style even though no logging is requested. I suspect that `ipyparallel` is doing something dirty.\r\n\r\n from parsl import DataFlowKernel\r\n from parsl.configs.local import localIPP as config\r\n\r\n dfk = DataFlowKernel(config=config)\r\n dfk.cleanup()\r\n\r\nThe above code with the minor change of using threads will not emit the log lines.\r\n\r\n\r\n from parsl import DataFlowKernel\r\n from parsl.configs.local import localThreads as config\r\n\r\n dfk = DataFlowKernel(config=config)\r\n dfk.cleanup()\r\n\r\n\r\nPlease help test by running this with the latest parsl code. \n", "before_files": [{"content": "import logging\nimport time\nimport math\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n # Add logic to check executors\n # for task in tasks :\n # if self.dfk.tasks[task]:\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.executor.outstanding\n\n status = executor.status()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n tasks_per_node = executor.provider.tasks_per_node\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n active_blocks = sum([1 for x in status if x in ('RUNNING',\n 'SUBMITTING',\n 'PENDING')])\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n # import pdb; pdb.set_trace()\n logger.debug(\"Tasks:{} Slots:{} Parallelism:{}\".format(len(active_tasks),\n active_slots,\n parallelism))\n\n # Case 1\n # No tasks.\n if len(active_tasks) == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Strategy: Scale_in, tasks=0 starting kill timer\")\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Strategy: Scale_in, tasks=0\")\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / len(active_tasks)) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((len(active_tasks) * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and len(active_tasks) > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}], "after_files": [{"content": "import logging\nimport time\nimport math\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n self.logger_flag = False\n self.prior_loghandlers = set(logging.getLogger().handlers)\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def unset_logging(self):\n \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n \"\"\"\n if self.logger_flag is True:\n return\n\n root_logger = logging.getLogger()\n\n for hndlr in root_logger.handlers:\n if hndlr not in self.prior_loghandlers:\n hndlr.setLevel(logging.ERROR)\n\n self.logger_flag = True\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.executor.outstanding\n\n status = executor.status()\n self.unset_logging()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n tasks_per_node = executor.provider.tasks_per_node\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n active_blocks = sum([1 for x in status if x in ('RUNNING',\n 'SUBMITTING',\n 'PENDING')])\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n # import pdb; pdb.set_trace()\n logger.debug(\"Tasks:{} Slots:{} Parallelism:{}\".format(len(active_tasks),\n active_slots,\n parallelism))\n\n # Case 1\n # No tasks.\n if len(active_tasks) == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Strategy: Scale_in, tasks=0 starting kill timer\")\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Strategy: Scale_in, tasks=0\")\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / len(active_tasks)) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((len(active_tasks) * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and len(active_tasks) > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}]}
| 2,815 | 433 |
gh_patches_debug_38335
|
rasdani/github-patches
|
git_diff
|
ethereum__consensus-specs-863
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename `Transactions` back to `Operations`
A few of us implementers have been talking about the naming of `Transactions` and believe it is best renamed back to `Operations` to lower confusion and potentially mistaking `Transactions` with transactions in the classical sense. The only thing that should be known as a `Transaction` is a `Transfer`.
If not, it would be great to know what the reason behind the rename was.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/phase0/state_transition.py`
Content:
```
1 from . import spec
2
3
4 from typing import ( # noqa: F401
5 Any,
6 Callable,
7 List,
8 NewType,
9 Tuple,
10 )
11
12 from .spec import (
13 BeaconState,
14 BeaconBlock,
15 )
16
17
18 def expected_deposit_count(state: BeaconState) -> int:
19 return min(
20 spec.MAX_DEPOSITS,
21 state.latest_eth1_data.deposit_count - state.deposit_index
22 )
23
24
25 def process_transaction_type(state: BeaconState,
26 transactions: List[Any],
27 max_transactions: int,
28 tx_fn: Callable[[BeaconState, Any], None]) -> None:
29 assert len(transactions) <= max_transactions
30 for transaction in transactions:
31 tx_fn(state, transaction)
32
33
34 def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
35 process_transaction_type(
36 state,
37 block.body.proposer_slashings,
38 spec.MAX_PROPOSER_SLASHINGS,
39 spec.process_proposer_slashing,
40 )
41
42 process_transaction_type(
43 state,
44 block.body.attester_slashings,
45 spec.MAX_ATTESTER_SLASHINGS,
46 spec.process_attester_slashing,
47 )
48
49 process_transaction_type(
50 state,
51 block.body.attestations,
52 spec.MAX_ATTESTATIONS,
53 spec.process_attestation,
54 )
55
56 assert len(block.body.deposits) == expected_deposit_count(state)
57 process_transaction_type(
58 state,
59 block.body.deposits,
60 spec.MAX_DEPOSITS,
61 spec.process_deposit,
62 )
63
64 process_transaction_type(
65 state,
66 block.body.voluntary_exits,
67 spec.MAX_VOLUNTARY_EXITS,
68 spec.process_voluntary_exit,
69 )
70
71 assert len(block.body.transfers) == len(set(block.body.transfers))
72 process_transaction_type(
73 state,
74 block.body.transfers,
75 spec.MAX_TRANSFERS,
76 spec.process_transfer,
77 )
78
79
80 def process_block(state: BeaconState,
81 block: BeaconBlock,
82 verify_state_root: bool=False) -> None:
83 spec.process_block_header(state, block)
84 spec.process_randao(state, block)
85 spec.process_eth1_data(state, block)
86
87 process_transactions(state, block)
88 if verify_state_root:
89 spec.verify_block_state_root(state, block)
90
91
92 def process_epoch_transition(state: BeaconState) -> None:
93 spec.update_justification_and_finalization(state)
94 spec.process_crosslinks(state)
95 spec.maybe_reset_eth1_period(state)
96 spec.apply_rewards(state)
97 spec.process_ejections(state)
98 spec.update_registry(state)
99 spec.process_slashings(state)
100 spec.process_exit_queue(state)
101 spec.finish_epoch_update(state)
102
103
104 def state_transition(state: BeaconState,
105 block: BeaconBlock,
106 verify_state_root: bool=False) -> BeaconState:
107 while state.slot < block.slot:
108 spec.cache_state(state)
109 if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
110 process_epoch_transition(state)
111 spec.advance_slot(state)
112 if block.slot == state.slot:
113 process_block(state, block, verify_state_root)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py
--- a/utils/phase0/state_transition.py
+++ b/utils/phase0/state_transition.py
@@ -22,31 +22,31 @@
)
-def process_transaction_type(state: BeaconState,
- transactions: List[Any],
- max_transactions: int,
- tx_fn: Callable[[BeaconState, Any], None]) -> None:
- assert len(transactions) <= max_transactions
- for transaction in transactions:
- tx_fn(state, transaction)
+def process_operation_type(state: BeaconState,
+ operations: List[Any],
+ max_operations: int,
+ tx_fn: Callable[[BeaconState, Any], None]) -> None:
+ assert len(operations) <= max_operations
+ for operation in operations:
+ tx_fn(state, operation)
-def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
- process_transaction_type(
+def process_operations(state: BeaconState, block: BeaconBlock) -> None:
+ process_operation_type(
state,
block.body.proposer_slashings,
spec.MAX_PROPOSER_SLASHINGS,
spec.process_proposer_slashing,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.attester_slashings,
spec.MAX_ATTESTER_SLASHINGS,
spec.process_attester_slashing,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.attestations,
spec.MAX_ATTESTATIONS,
@@ -54,14 +54,14 @@
)
assert len(block.body.deposits) == expected_deposit_count(state)
- process_transaction_type(
+ process_operation_type(
state,
block.body.deposits,
spec.MAX_DEPOSITS,
spec.process_deposit,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.voluntary_exits,
spec.MAX_VOLUNTARY_EXITS,
@@ -69,7 +69,7 @@
)
assert len(block.body.transfers) == len(set(block.body.transfers))
- process_transaction_type(
+ process_operation_type(
state,
block.body.transfers,
spec.MAX_TRANSFERS,
@@ -84,7 +84,7 @@
spec.process_randao(state, block)
spec.process_eth1_data(state, block)
- process_transactions(state, block)
+ process_operations(state, block)
if verify_state_root:
spec.verify_block_state_root(state, block)
|
{"golden_diff": "diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py\n--- a/utils/phase0/state_transition.py\n+++ b/utils/phase0/state_transition.py\n@@ -22,31 +22,31 @@\n )\n \n \n-def process_transaction_type(state: BeaconState,\n- transactions: List[Any],\n- max_transactions: int,\n- tx_fn: Callable[[BeaconState, Any], None]) -> None:\n- assert len(transactions) <= max_transactions\n- for transaction in transactions:\n- tx_fn(state, transaction)\n+def process_operation_type(state: BeaconState,\n+ operations: List[Any],\n+ max_operations: int,\n+ tx_fn: Callable[[BeaconState, Any], None]) -> None:\n+ assert len(operations) <= max_operations\n+ for operation in operations:\n+ tx_fn(state, operation)\n \n \n-def process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n- process_transaction_type(\n+def process_operations(state: BeaconState, block: BeaconBlock) -> None:\n+ process_operation_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n@@ -54,14 +54,14 @@\n )\n \n assert len(block.body.deposits) == expected_deposit_count(state)\n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n@@ -69,7 +69,7 @@\n )\n \n assert len(block.body.transfers) == len(set(block.body.transfers))\n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n@@ -84,7 +84,7 @@\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n \n- process_transactions(state, block)\n+ process_operations(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n", "issue": "Rename `Transactions` back to `Operations`\nA few of us implementers have been talking about the naming of `Transactions` and believe it is best renamed back to `Operations` to lower confusion and potentially mistaking `Transactions` with transactions in the classical sense. The only thing that should be known as a `Transaction` is a `Transfer`.\r\n\r\nIf not, it would be great to know what the reason behind the rename was.\r\n\n", "before_files": [{"content": "from . import spec\n\n\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n List,\n NewType,\n Tuple,\n)\n\nfrom .spec import (\n BeaconState,\n BeaconBlock,\n)\n\n\ndef expected_deposit_count(state: BeaconState) -> int:\n return min(\n spec.MAX_DEPOSITS,\n state.latest_eth1_data.deposit_count - state.deposit_index\n )\n\n\ndef process_transaction_type(state: BeaconState,\n transactions: List[Any],\n max_transactions: int,\n tx_fn: Callable[[BeaconState, Any], None]) -> None:\n assert len(transactions) <= max_transactions\n for transaction in transactions:\n tx_fn(state, transaction)\n\n\ndef process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n process_transaction_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n\n assert len(block.body.deposits) == expected_deposit_count(state)\n process_transaction_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n\n process_transaction_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_transaction_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n spec.process_transfer,\n )\n\n\ndef process_block(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> None:\n spec.process_block_header(state, block)\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n\n process_transactions(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n\n\ndef process_epoch_transition(state: BeaconState) -> None:\n spec.update_justification_and_finalization(state)\n spec.process_crosslinks(state)\n spec.maybe_reset_eth1_period(state)\n spec.apply_rewards(state)\n spec.process_ejections(state)\n spec.update_registry(state)\n spec.process_slashings(state)\n spec.process_exit_queue(state)\n spec.finish_epoch_update(state)\n\n\ndef state_transition(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> BeaconState:\n while state.slot < block.slot:\n spec.cache_state(state)\n if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:\n process_epoch_transition(state)\n spec.advance_slot(state)\n if block.slot == state.slot:\n process_block(state, block, verify_state_root)\n", "path": "utils/phase0/state_transition.py"}], "after_files": [{"content": "from . import spec\n\n\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n List,\n NewType,\n Tuple,\n)\n\nfrom .spec import (\n BeaconState,\n BeaconBlock,\n)\n\n\ndef expected_deposit_count(state: BeaconState) -> int:\n return min(\n spec.MAX_DEPOSITS,\n state.latest_eth1_data.deposit_count - state.deposit_index\n )\n\n\ndef process_operation_type(state: BeaconState,\n operations: List[Any],\n max_operations: int,\n tx_fn: Callable[[BeaconState, Any], None]) -> None:\n assert len(operations) <= max_operations\n for operation in operations:\n tx_fn(state, operation)\n\n\ndef process_operations(state: BeaconState, block: BeaconBlock) -> None:\n process_operation_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n\n process_operation_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n\n process_operation_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n\n assert len(block.body.deposits) == expected_deposit_count(state)\n process_operation_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n\n process_operation_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_operation_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n spec.process_transfer,\n )\n\n\ndef process_block(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> None:\n spec.process_block_header(state, block)\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n\n process_operations(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n\n\ndef process_epoch_transition(state: BeaconState) -> None:\n spec.update_justification_and_finalization(state)\n spec.process_crosslinks(state)\n spec.maybe_reset_eth1_period(state)\n spec.apply_rewards(state)\n spec.process_ejections(state)\n spec.update_registry(state)\n spec.process_slashings(state)\n spec.process_exit_queue(state)\n spec.finish_epoch_update(state)\n\n\ndef state_transition(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> BeaconState:\n while state.slot < block.slot:\n spec.cache_state(state)\n if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:\n process_epoch_transition(state)\n spec.advance_slot(state)\n if block.slot == state.slot:\n process_block(state, block, verify_state_root)\n", "path": "utils/phase0/state_transition.py"}]}
| 1,246 | 569 |
gh_patches_debug_30729
|
rasdani/github-patches
|
git_diff
|
wearepal__EthicML-337
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SVM Kernel name
Clearly [this](https://github.com/predictive-analytics-lab/EthicML/blob/f7fcf435b5807ef9931f3ff3b259fc7cc4b38da8/ethicml/algorithms/inprocess/svm.py#L20) is not right
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ethicml/algorithms/inprocess/svm.py`
Content:
```
1 """Wrapper for SKLearn implementation of SVM."""
2 from typing import Optional, Union
3
4 import pandas as pd
5 from sklearn.svm import SVC, LinearSVC
6
7 from ethicml.common import implements
8 from ethicml.utility import DataTuple, Prediction, TestTuple
9
10 from .in_algorithm import InAlgorithm
11
12 __all__ = ["SVM"]
13
14
15 class SVM(InAlgorithm):
16 """Support Vector Machine."""
17
18 def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):
19 """Init SVM."""
20 kernel_name = f" (kernel)" if kernel is not None else ""
21 super().__init__(name="SVM" + kernel_name, is_fairness_algo=False)
22 self.C = SVC().C if C is None else C
23 self.kernel = SVC().kernel if kernel is None else kernel
24
25 @implements(InAlgorithm)
26 def run(self, train: DataTuple, test: Union[DataTuple, TestTuple]) -> Prediction:
27 clf = select_svm(self.C, self.kernel)
28 clf.fit(train.x, train.y.to_numpy().ravel())
29 return Prediction(hard=pd.Series(clf.predict(test.x)))
30
31
32 def select_svm(C: float, kernel: str) -> SVC:
33 """Select the appropriate SVM model for the given parameters."""
34 if kernel == "linear":
35 return LinearSVC(C=C, dual=False, tol=1e-12, random_state=888)
36 return SVC(C=C, kernel=kernel, gamma="auto", random_state=888)
37
```
Path: `ethicml/algorithms/inprocess/logistic_regression.py`
Content:
```
1 """Wrapper around Sci-Kit Learn Logistic Regression."""
2 from typing import Optional
3
4 import pandas as pd
5 from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
6 from sklearn.model_selection import KFold
7
8 from ethicml.common import implements
9 from ethicml.utility import DataTuple, Prediction, SoftPrediction, TestTuple
10
11 from .in_algorithm import InAlgorithm
12
13 __all__ = ["LR", "LRCV", "LRProb"]
14
15
16 class LR(InAlgorithm):
17 """Logistic regression with hard predictions."""
18
19 def __init__(self, C: Optional[float] = None):
20 """Init LR."""
21 self.C = LogisticRegression().C if C is None else C
22 super().__init__(name=f"Logistic Regression, C={self.C}", is_fairness_algo=False)
23
24 @implements(InAlgorithm)
25 def run(self, train: DataTuple, test: TestTuple) -> Prediction:
26 clf = LogisticRegression(solver="liblinear", random_state=888, C=self.C, multi_class="auto")
27 clf.fit(train.x, train.y.to_numpy().ravel())
28 return Prediction(hard=pd.Series(clf.predict(test.x)))
29
30
31 class LRProb(InAlgorithm):
32 """Logistic regression with soft output."""
33
34 def __init__(self, C: Optional[int] = None):
35 """Init LRProb."""
36 self.C = LogisticRegression().C if C is None else C
37 super().__init__(name=f"Logistic Regression Prob, C={self.C}", is_fairness_algo=False)
38
39 @implements(InAlgorithm)
40 def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:
41 clf = LogisticRegression(solver="liblinear", random_state=888, C=self.C, multi_class="auto")
42 clf.fit(train.x, train.y.to_numpy().ravel())
43 return SoftPrediction(soft=pd.Series(clf.predict_proba(test.x)[:, 1]))
44
45
46 class LRCV(InAlgorithm):
47 """Kind of a cheap hack for now, but gives a proper cross-valudeted LR."""
48
49 def __init__(self) -> None:
50 """Init LRCV."""
51 super().__init__(name="LRCV", is_fairness_algo=False)
52
53 @implements(InAlgorithm)
54 def run(self, train: DataTuple, test: TestTuple) -> Prediction:
55 folder = KFold(n_splits=3, shuffle=False)
56 clf = LogisticRegressionCV(
57 cv=folder, n_jobs=-1, random_state=888, solver="liblinear", multi_class="auto"
58 )
59 clf.fit(train.x, train.y.to_numpy().ravel())
60 return Prediction(hard=pd.Series(clf.predict(test.x)), info=dict(C=clf.C_[0]))
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ethicml/algorithms/inprocess/logistic_regression.py b/ethicml/algorithms/inprocess/logistic_regression.py
--- a/ethicml/algorithms/inprocess/logistic_regression.py
+++ b/ethicml/algorithms/inprocess/logistic_regression.py
@@ -19,7 +19,7 @@
def __init__(self, C: Optional[float] = None):
"""Init LR."""
self.C = LogisticRegression().C if C is None else C
- super().__init__(name=f"Logistic Regression, C={self.C}", is_fairness_algo=False)
+ super().__init__(name=f"Logistic Regression (C={self.C})", is_fairness_algo=False)
@implements(InAlgorithm)
def run(self, train: DataTuple, test: TestTuple) -> Prediction:
@@ -34,7 +34,7 @@
def __init__(self, C: Optional[int] = None):
"""Init LRProb."""
self.C = LogisticRegression().C if C is None else C
- super().__init__(name=f"Logistic Regression Prob, C={self.C}", is_fairness_algo=False)
+ super().__init__(name=f"Logistic Regression Prob (C={self.C})", is_fairness_algo=False)
@implements(InAlgorithm)
def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:
diff --git a/ethicml/algorithms/inprocess/svm.py b/ethicml/algorithms/inprocess/svm.py
--- a/ethicml/algorithms/inprocess/svm.py
+++ b/ethicml/algorithms/inprocess/svm.py
@@ -17,7 +17,7 @@
def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):
"""Init SVM."""
- kernel_name = f" (kernel)" if kernel is not None else ""
+ kernel_name = f" ({kernel})" if kernel is not None else ""
super().__init__(name="SVM" + kernel_name, is_fairness_algo=False)
self.C = SVC().C if C is None else C
self.kernel = SVC().kernel if kernel is None else kernel
|
{"golden_diff": "diff --git a/ethicml/algorithms/inprocess/logistic_regression.py b/ethicml/algorithms/inprocess/logistic_regression.py\n--- a/ethicml/algorithms/inprocess/logistic_regression.py\n+++ b/ethicml/algorithms/inprocess/logistic_regression.py\n@@ -19,7 +19,7 @@\n def __init__(self, C: Optional[float] = None):\n \"\"\"Init LR.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n- super().__init__(name=f\"Logistic Regression, C={self.C}\", is_fairness_algo=False)\n+ super().__init__(name=f\"Logistic Regression (C={self.C})\", is_fairness_algo=False)\n \n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n@@ -34,7 +34,7 @@\n def __init__(self, C: Optional[int] = None):\n \"\"\"Init LRProb.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n- super().__init__(name=f\"Logistic Regression Prob, C={self.C}\", is_fairness_algo=False)\n+ super().__init__(name=f\"Logistic Regression Prob (C={self.C})\", is_fairness_algo=False)\n \n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:\ndiff --git a/ethicml/algorithms/inprocess/svm.py b/ethicml/algorithms/inprocess/svm.py\n--- a/ethicml/algorithms/inprocess/svm.py\n+++ b/ethicml/algorithms/inprocess/svm.py\n@@ -17,7 +17,7 @@\n \n def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):\n \"\"\"Init SVM.\"\"\"\n- kernel_name = f\" (kernel)\" if kernel is not None else \"\"\n+ kernel_name = f\" ({kernel})\" if kernel is not None else \"\"\n super().__init__(name=\"SVM\" + kernel_name, is_fairness_algo=False)\n self.C = SVC().C if C is None else C\n self.kernel = SVC().kernel if kernel is None else kernel\n", "issue": "SVM Kernel name\nClearly [this](https://github.com/predictive-analytics-lab/EthicML/blob/f7fcf435b5807ef9931f3ff3b259fc7cc4b38da8/ethicml/algorithms/inprocess/svm.py#L20) is not right \n", "before_files": [{"content": "\"\"\"Wrapper for SKLearn implementation of SVM.\"\"\"\nfrom typing import Optional, Union\n\nimport pandas as pd\nfrom sklearn.svm import SVC, LinearSVC\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"SVM\"]\n\n\nclass SVM(InAlgorithm):\n \"\"\"Support Vector Machine.\"\"\"\n\n def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):\n \"\"\"Init SVM.\"\"\"\n kernel_name = f\" (kernel)\" if kernel is not None else \"\"\n super().__init__(name=\"SVM\" + kernel_name, is_fairness_algo=False)\n self.C = SVC().C if C is None else C\n self.kernel = SVC().kernel if kernel is None else kernel\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: Union[DataTuple, TestTuple]) -> Prediction:\n clf = select_svm(self.C, self.kernel)\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\ndef select_svm(C: float, kernel: str) -> SVC:\n \"\"\"Select the appropriate SVM model for the given parameters.\"\"\"\n if kernel == \"linear\":\n return LinearSVC(C=C, dual=False, tol=1e-12, random_state=888)\n return SVC(C=C, kernel=kernel, gamma=\"auto\", random_state=888)\n", "path": "ethicml/algorithms/inprocess/svm.py"}, {"content": "\"\"\"Wrapper around Sci-Kit Learn Logistic Regression.\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.model_selection import KFold\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, SoftPrediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"LR\", \"LRCV\", \"LRProb\"]\n\n\nclass LR(InAlgorithm):\n \"\"\"Logistic regression with hard predictions.\"\"\"\n\n def __init__(self, C: Optional[float] = None):\n \"\"\"Init LR.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression, C={self.C}\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\nclass LRProb(InAlgorithm):\n \"\"\"Logistic regression with soft output.\"\"\"\n\n def __init__(self, C: Optional[int] = None):\n \"\"\"Init LRProb.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression Prob, C={self.C}\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return SoftPrediction(soft=pd.Series(clf.predict_proba(test.x)[:, 1]))\n\n\nclass LRCV(InAlgorithm):\n \"\"\"Kind of a cheap hack for now, but gives a proper cross-valudeted LR.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Init LRCV.\"\"\"\n super().__init__(name=\"LRCV\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n folder = KFold(n_splits=3, shuffle=False)\n clf = LogisticRegressionCV(\n cv=folder, n_jobs=-1, random_state=888, solver=\"liblinear\", multi_class=\"auto\"\n )\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)), info=dict(C=clf.C_[0]))\n", "path": "ethicml/algorithms/inprocess/logistic_regression.py"}], "after_files": [{"content": "\"\"\"Wrapper for SKLearn implementation of SVM.\"\"\"\nfrom typing import Optional, Union\n\nimport pandas as pd\nfrom sklearn.svm import SVC, LinearSVC\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"SVM\"]\n\n\nclass SVM(InAlgorithm):\n \"\"\"Support Vector Machine.\"\"\"\n\n def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):\n \"\"\"Init SVM.\"\"\"\n kernel_name = f\" ({kernel})\" if kernel is not None else \"\"\n super().__init__(name=\"SVM\" + kernel_name, is_fairness_algo=False)\n self.C = SVC().C if C is None else C\n self.kernel = SVC().kernel if kernel is None else kernel\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: Union[DataTuple, TestTuple]) -> Prediction:\n clf = select_svm(self.C, self.kernel)\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\ndef select_svm(C: float, kernel: str) -> SVC:\n \"\"\"Select the appropriate SVM model for the given parameters.\"\"\"\n if kernel == \"linear\":\n return LinearSVC(C=C, dual=False, tol=1e-12, random_state=888)\n return SVC(C=C, kernel=kernel, gamma=\"auto\", random_state=888)\n", "path": "ethicml/algorithms/inprocess/svm.py"}, {"content": "\"\"\"Wrapper around Sci-Kit Learn Logistic Regression.\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.model_selection import KFold\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, SoftPrediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"LR\", \"LRCV\", \"LRProb\"]\n\n\nclass LR(InAlgorithm):\n \"\"\"Logistic regression with hard predictions.\"\"\"\n\n def __init__(self, C: Optional[float] = None):\n \"\"\"Init LR.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression (C={self.C})\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\nclass LRProb(InAlgorithm):\n \"\"\"Logistic regression with soft output.\"\"\"\n\n def __init__(self, C: Optional[int] = None):\n \"\"\"Init LRProb.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression Prob (C={self.C})\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return SoftPrediction(soft=pd.Series(clf.predict_proba(test.x)[:, 1]))\n\n\nclass LRCV(InAlgorithm):\n \"\"\"Kind of a cheap hack for now, but gives a proper cross-valudeted LR.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Init LRCV.\"\"\"\n super().__init__(name=\"LRCV\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n folder = KFold(n_splits=3, shuffle=False)\n clf = LogisticRegressionCV(\n cv=folder, n_jobs=-1, random_state=888, solver=\"liblinear\", multi_class=\"auto\"\n )\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)), info=dict(C=clf.C_[0]))\n", "path": "ethicml/algorithms/inprocess/logistic_regression.py"}]}
| 1,458 | 489 |
gh_patches_debug_35533
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3739
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set environment variable when running scrapy check
Sometimes it is nice to be able to enable/disable functionality, e.g. calculating things in settings.py when just checking spider contracts instead of running a crawl. I therefor propose setting an environment variable like `SCRAPY_CHECK` when using the check command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/misc.py`
Content:
```
1 """Helper functions which don't fit anywhere else"""
2 import re
3 import hashlib
4 from importlib import import_module
5 from pkgutil import iter_modules
6
7 import six
8 from w3lib.html import replace_entities
9
10 from scrapy.utils.python import flatten, to_unicode
11 from scrapy.item import BaseItem
12
13
14 _ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes
15
16
17 def arg_to_iter(arg):
18 """Convert an argument to an iterable. The argument can be a None, single
19 value, or an iterable.
20
21 Exception: if arg is a dict, [arg] will be returned
22 """
23 if arg is None:
24 return []
25 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
26 return arg
27 else:
28 return [arg]
29
30
31 def load_object(path):
32 """Load an object given its absolute object path, and return it.
33
34 object can be a class, function, variable or an instance.
35 path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
36 """
37
38 try:
39 dot = path.rindex('.')
40 except ValueError:
41 raise ValueError("Error loading object '%s': not a full path" % path)
42
43 module, name = path[:dot], path[dot+1:]
44 mod = import_module(module)
45
46 try:
47 obj = getattr(mod, name)
48 except AttributeError:
49 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
50
51 return obj
52
53
54 def walk_modules(path):
55 """Loads a module and all its submodules from the given module path and
56 returns them. If *any* module throws an exception while importing, that
57 exception is thrown back.
58
59 For example: walk_modules('scrapy.utils')
60 """
61
62 mods = []
63 mod = import_module(path)
64 mods.append(mod)
65 if hasattr(mod, '__path__'):
66 for _, subpath, ispkg in iter_modules(mod.__path__):
67 fullpath = path + '.' + subpath
68 if ispkg:
69 mods += walk_modules(fullpath)
70 else:
71 submod = import_module(fullpath)
72 mods.append(submod)
73 return mods
74
75
76 def extract_regex(regex, text, encoding='utf-8'):
77 """Extract a list of unicode strings from the given text/encoding using the following policies:
78
79 * if the regex contains a named group called "extract" that will be returned
80 * if the regex contains multiple numbered groups, all those will be returned (flattened)
81 * if the regex doesn't contain any group the entire regex matching is returned
82 """
83
84 if isinstance(regex, six.string_types):
85 regex = re.compile(regex, re.UNICODE)
86
87 try:
88 strings = [regex.search(text).group('extract')] # named group
89 except Exception:
90 strings = regex.findall(text) # full regex or numbered groups
91 strings = flatten(strings)
92
93 if isinstance(text, six.text_type):
94 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
95 else:
96 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
97 for s in strings]
98
99
100 def md5sum(file):
101 """Calculate the md5 checksum of a file-like object without reading its
102 whole content in memory.
103
104 >>> from io import BytesIO
105 >>> md5sum(BytesIO(b'file content to hash'))
106 '784406af91dd5a54fbb9c84c2236595a'
107 """
108 m = hashlib.md5()
109 while True:
110 d = file.read(8096)
111 if not d:
112 break
113 m.update(d)
114 return m.hexdigest()
115
116
117 def rel_has_nofollow(rel):
118 """Return True if link rel attribute has nofollow type"""
119 return rel is not None and 'nofollow' in rel.split()
120
121
122 def create_instance(objcls, settings, crawler, *args, **kwargs):
123 """Construct a class instance using its ``from_crawler`` or
124 ``from_settings`` constructors, if available.
125
126 At least one of ``settings`` and ``crawler`` needs to be different from
127 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
128 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
129 tried.
130
131 ``*args`` and ``**kwargs`` are forwarded to the constructors.
132
133 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
134 """
135 if settings is None:
136 if crawler is None:
137 raise ValueError("Specifiy at least one of settings and crawler.")
138 settings = crawler.settings
139 if crawler and hasattr(objcls, 'from_crawler'):
140 return objcls.from_crawler(crawler, *args, **kwargs)
141 elif hasattr(objcls, 'from_settings'):
142 return objcls.from_settings(settings, *args, **kwargs)
143 else:
144 return objcls(*args, **kwargs)
145
```
Path: `scrapy/commands/check.py`
Content:
```
1 from __future__ import print_function
2 import time
3 import sys
4 from collections import defaultdict
5 from unittest import TextTestRunner, TextTestResult as _TextTestResult
6
7 from scrapy.commands import ScrapyCommand
8 from scrapy.contracts import ContractsManager
9 from scrapy.utils.misc import load_object
10 from scrapy.utils.conf import build_component_list
11
12
13 class TextTestResult(_TextTestResult):
14 def printSummary(self, start, stop):
15 write = self.stream.write
16 writeln = self.stream.writeln
17
18 run = self.testsRun
19 plural = "s" if run != 1 else ""
20
21 writeln(self.separator2)
22 writeln("Ran %d contract%s in %.3fs" % (run, plural, stop - start))
23 writeln()
24
25 infos = []
26 if not self.wasSuccessful():
27 write("FAILED")
28 failed, errored = map(len, (self.failures, self.errors))
29 if failed:
30 infos.append("failures=%d" % failed)
31 if errored:
32 infos.append("errors=%d" % errored)
33 else:
34 write("OK")
35
36 if infos:
37 writeln(" (%s)" % (", ".join(infos),))
38 else:
39 write("\n")
40
41
42 class Command(ScrapyCommand):
43 requires_project = True
44 default_settings = {'LOG_ENABLED': False}
45
46 def syntax(self):
47 return "[options] <spider>"
48
49 def short_desc(self):
50 return "Check spider contracts"
51
52 def add_options(self, parser):
53 ScrapyCommand.add_options(self, parser)
54 parser.add_option("-l", "--list", dest="list", action="store_true",
55 help="only list contracts, without checking them")
56 parser.add_option("-v", "--verbose", dest="verbose", default=False, action='store_true',
57 help="print contract tests for all spiders")
58
59 def run(self, args, opts):
60 # load contracts
61 contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))
62 conman = ContractsManager(load_object(c) for c in contracts)
63 runner = TextTestRunner(verbosity=2 if opts.verbose else 1)
64 result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)
65
66 # contract requests
67 contract_reqs = defaultdict(list)
68
69 spider_loader = self.crawler_process.spider_loader
70
71 for spidername in args or spider_loader.list():
72 spidercls = spider_loader.load(spidername)
73 spidercls.start_requests = lambda s: conman.from_spider(s, result)
74
75 tested_methods = conman.tested_methods_from_spidercls(spidercls)
76 if opts.list:
77 for method in tested_methods:
78 contract_reqs[spidercls.name].append(method)
79 elif tested_methods:
80 self.crawler_process.crawl(spidercls)
81
82 # start checks
83 if opts.list:
84 for spider, methods in sorted(contract_reqs.items()):
85 if not methods and not opts.verbose:
86 continue
87 print(spider)
88 for method in sorted(methods):
89 print(' * %s' % method)
90 else:
91 start = time.time()
92 self.crawler_process.start()
93 stop = time.time()
94
95 result.printErrors()
96 result.printSummary(start, stop)
97 self.exitcode = int(not result.wasSuccessful())
98
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py
--- a/scrapy/commands/check.py
+++ b/scrapy/commands/check.py
@@ -6,7 +6,7 @@
from scrapy.commands import ScrapyCommand
from scrapy.contracts import ContractsManager
-from scrapy.utils.misc import load_object
+from scrapy.utils.misc import load_object, set_environ
from scrapy.utils.conf import build_component_list
@@ -68,16 +68,17 @@
spider_loader = self.crawler_process.spider_loader
- for spidername in args or spider_loader.list():
- spidercls = spider_loader.load(spidername)
- spidercls.start_requests = lambda s: conman.from_spider(s, result)
-
- tested_methods = conman.tested_methods_from_spidercls(spidercls)
- if opts.list:
- for method in tested_methods:
- contract_reqs[spidercls.name].append(method)
- elif tested_methods:
- self.crawler_process.crawl(spidercls)
+ with set_environ(SCRAPY_CHECK='true'):
+ for spidername in args or spider_loader.list():
+ spidercls = spider_loader.load(spidername)
+ spidercls.start_requests = lambda s: conman.from_spider(s, result)
+
+ tested_methods = conman.tested_methods_from_spidercls(spidercls)
+ if opts.list:
+ for method in tested_methods:
+ contract_reqs[spidercls.name].append(method)
+ elif tested_methods:
+ self.crawler_process.crawl(spidercls)
# start checks
if opts.list:
diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -1,6 +1,8 @@
"""Helper functions which don't fit anywhere else"""
+import os
import re
import hashlib
+from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
@@ -142,3 +144,21 @@
return objcls.from_settings(settings, *args, **kwargs)
else:
return objcls(*args, **kwargs)
+
+
+@contextmanager
+def set_environ(**kwargs):
+ """Temporarily set environment variables inside the context manager and
+ fully restore previous environment afterwards
+ """
+
+ original_env = {k: os.environ.get(k) for k in kwargs}
+ os.environ.update(kwargs)
+ try:
+ yield
+ finally:
+ for k, v in original_env.items():
+ if v is None:
+ del os.environ[k]
+ else:
+ os.environ[k] = v
|
{"golden_diff": "diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py\n--- a/scrapy/commands/check.py\n+++ b/scrapy/commands/check.py\n@@ -6,7 +6,7 @@\n \n from scrapy.commands import ScrapyCommand\n from scrapy.contracts import ContractsManager\n-from scrapy.utils.misc import load_object\n+from scrapy.utils.misc import load_object, set_environ\n from scrapy.utils.conf import build_component_list\n \n \n@@ -68,16 +68,17 @@\n \n spider_loader = self.crawler_process.spider_loader\n \n- for spidername in args or spider_loader.list():\n- spidercls = spider_loader.load(spidername)\n- spidercls.start_requests = lambda s: conman.from_spider(s, result)\n-\n- tested_methods = conman.tested_methods_from_spidercls(spidercls)\n- if opts.list:\n- for method in tested_methods:\n- contract_reqs[spidercls.name].append(method)\n- elif tested_methods:\n- self.crawler_process.crawl(spidercls)\n+ with set_environ(SCRAPY_CHECK='true'):\n+ for spidername in args or spider_loader.list():\n+ spidercls = spider_loader.load(spidername)\n+ spidercls.start_requests = lambda s: conman.from_spider(s, result)\n+\n+ tested_methods = conman.tested_methods_from_spidercls(spidercls)\n+ if opts.list:\n+ for method in tested_methods:\n+ contract_reqs[spidercls.name].append(method)\n+ elif tested_methods:\n+ self.crawler_process.crawl(spidercls)\n \n # start checks\n if opts.list:\ndiff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -1,6 +1,8 @@\n \"\"\"Helper functions which don't fit anywhere else\"\"\"\n+import os\n import re\n import hashlib\n+from contextlib import contextmanager\n from importlib import import_module\n from pkgutil import iter_modules\n \n@@ -142,3 +144,21 @@\n return objcls.from_settings(settings, *args, **kwargs)\n else:\n return objcls(*args, **kwargs)\n+\n+\n+@contextmanager\n+def set_environ(**kwargs):\n+ \"\"\"Temporarily set environment variables inside the context manager and\n+ fully restore previous environment afterwards\n+ \"\"\"\n+\n+ original_env = {k: os.environ.get(k) for k in kwargs}\n+ os.environ.update(kwargs)\n+ try:\n+ yield\n+ finally:\n+ for k, v in original_env.items():\n+ if v is None:\n+ del os.environ[k]\n+ else:\n+ os.environ[k] = v\n", "issue": "Set environment variable when running scrapy check\nSometimes it is nice to be able to enable/disable functionality, e.g. calculating things in settings.py when just checking spider contracts instead of running a crawl. I therefor propose setting an environment variable like `SCRAPY_CHECK` when using the check command.\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport re\nimport hashlib\nfrom importlib import import_module\nfrom pkgutil import iter_modules\n\nimport six\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be a class, function, variable or an instance.\n path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, six.string_types):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, six.text_type):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specifiy at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n return objcls.from_crawler(crawler, *args, **kwargs)\n elif hasattr(objcls, 'from_settings'):\n return objcls.from_settings(settings, *args, **kwargs)\n else:\n return objcls(*args, **kwargs)\n", "path": "scrapy/utils/misc.py"}, {"content": "from __future__ import print_function\nimport time\nimport sys\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n\n", "path": "scrapy/commands/check.py"}], "after_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport os\nimport re\nimport hashlib\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\n\nimport six\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be a class, function, variable or an instance.\n path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, six.string_types):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, six.text_type):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specifiy at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n return objcls.from_crawler(crawler, *args, **kwargs)\n elif hasattr(objcls, 'from_settings'):\n return objcls.from_settings(settings, *args, **kwargs)\n else:\n return objcls(*args, **kwargs)\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n", "path": "scrapy/utils/misc.py"}, {"content": "from __future__ import print_function\nimport time\nimport sys\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object, set_environ\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n with set_environ(SCRAPY_CHECK='true'):\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n\n", "path": "scrapy/commands/check.py"}]}
| 2,668 | 604 |
gh_patches_debug_28181
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-622
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List of people who taught at events of specific type
Usecase: Tracy wants to grab list of people who taught at DC workshops, so that she knows who is experienced DC instructor.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/filters.py`
Content:
```
1 from distutils.util import strtobool
2
3 import django.forms.widgets
4
5 import django_filters
6 from django_countries import Countries
7
8 from workshops.models import Event, Host, Person, Task, Airport, EventRequest
9
10 EMPTY_SELECTION = (None, '---------')
11
12
13 class AllCountriesFilter(django_filters.ChoiceFilter):
14 @property
15 def field(self):
16 qs = self.model._default_manager.distinct()
17 qs = qs.order_by(self.name).values_list(self.name, flat=True)
18
19 choices = [o for o in qs if o]
20 countries = Countries()
21 countries.only = choices
22
23 self.extra['choices'] = list(countries)
24 self.extra['choices'].insert(0, EMPTY_SELECTION)
25 return super().field
26
27
28 class ForeignKeyAllValuesFilter(django_filters.ChoiceFilter):
29 def __init__(self, model, *args, **kwargs):
30 self.lookup_model = model
31 super().__init__(*args, **kwargs)
32
33 @property
34 def field(self):
35 name = self.name
36 model = self.lookup_model
37
38 qs1 = self.model._default_manager.distinct()
39 qs1 = qs1.order_by(name).values_list(name, flat=True)
40 qs2 = model.objects.filter(pk__in=qs1)
41 self.extra['choices'] = [(o.pk, str(o)) for o in qs2]
42 self.extra['choices'].insert(0, EMPTY_SELECTION)
43 return super().field
44
45
46 class EventStateFilter(django_filters.ChoiceFilter):
47 def filter(self, qs, value):
48 if isinstance(value, django_filters.fields.Lookup):
49 value = value.value
50
51 # no filtering
52 if value in ([], (), {}, None, '', 'all'):
53 return qs
54
55 # no need to check if value exists in self.extra['choices'] because
56 # validation is done by django_filters
57 try:
58 return getattr(qs, "{}_events".format(value))()
59 except AttributeError:
60 return qs
61
62
63 class EventFilter(django_filters.FilterSet):
64 assigned_to = ForeignKeyAllValuesFilter(Person)
65 host = ForeignKeyAllValuesFilter(Host)
66 administrator = ForeignKeyAllValuesFilter(Host)
67
68 STATUS_CHOICES = [
69 ('', 'All'),
70 ('past', 'Past'),
71 ('ongoing', 'Ongoing'),
72 ('upcoming', 'Upcoming'),
73 ('unpublished', 'Unpublished'),
74 ('uninvoiced', 'Uninvoiced'),
75 ]
76 status = EventStateFilter(choices=STATUS_CHOICES)
77
78 invoice_status = django_filters.ChoiceFilter(
79 choices=(EMPTY_SELECTION, ) + Event.INVOICED_CHOICES,
80 )
81
82 class Meta:
83 model = Event
84 fields = [
85 'assigned_to',
86 'tags',
87 'host',
88 'administrator',
89 'invoice_status',
90 'completed',
91 ]
92 order_by = ['-slug', 'slug', 'start', '-start', 'end', '-end']
93
94
95 class EventRequestFilter(django_filters.FilterSet):
96 assigned_to = ForeignKeyAllValuesFilter(Person)
97 country = AllCountriesFilter()
98 active = django_filters.TypedChoiceFilter(
99 choices=(('true', 'Open'), ('false', 'Closed')),
100 coerce=strtobool,
101 label='Status',
102 widget=django.forms.widgets.RadioSelect,
103 )
104
105 class Meta:
106 model = EventRequest
107 fields = [
108 'assigned_to',
109 'workshop_type',
110 'active',
111 'country',
112 ]
113 order_by = ['-created_at', 'created_at']
114
115
116 class HostFilter(django_filters.FilterSet):
117 country = AllCountriesFilter()
118
119 class Meta:
120 model = Host
121 fields = [
122 'country',
123 ]
124 order_by = ['fullname', '-fullname', 'domain', '-domain', ]
125
126
127 class PersonFilter(django_filters.FilterSet):
128 class Meta:
129 model = Person
130 fields = [
131 'badges',
132 ]
133 order_by = ["lastname", "-lastname", "firstname", "-firstname",
134 "email", "-email"]
135
136 def get_order_by(self, order_value):
137 if order_value == 'firstname':
138 return ['personal', 'middle', 'family']
139 elif order_value == '-firstname':
140 return ['-personal', '-middle', '-family']
141 elif order_value == 'lastname':
142 return ['family', 'middle', 'personal']
143 elif order_value == '-lastname':
144 return ['-family', '-middle', '-personal']
145 return super().get_order_by(order_value)
146
147
148 class TaskFilter(django_filters.FilterSet):
149 class Meta:
150 model = Task
151 fields = [
152 'event',
153 # can't filter on person because person's name contains 3 fields:
154 # person.personal, person.middle, person.family
155 # 'person',
156 'role',
157 ]
158 order_by = [
159 ['event__slug', 'Event'],
160 ['-event__slug', 'Event (descending)'],
161 ['person__family', 'Person'],
162 ['-person__family', 'Person (descending)'],
163 ['role', 'Role'],
164 ['-role', 'Role (descending)'],
165 ]
166
167
168 class AirportFilter(django_filters.FilterSet):
169 fullname = django_filters.CharFilter(lookup_type='icontains')
170
171 class Meta:
172 model = Airport
173 fields = [
174 'fullname',
175 ]
176 order_by = ["iata", "-iata", "fullname", "-fullname"]
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/workshops/filters.py b/workshops/filters.py
--- a/workshops/filters.py
+++ b/workshops/filters.py
@@ -5,7 +5,17 @@
import django_filters
from django_countries import Countries
-from workshops.models import Event, Host, Person, Task, Airport, EventRequest
+from workshops.models import (
+ Event,
+ Host,
+ Person,
+ Task,
+ Airport,
+ EventRequest,
+ Tag,
+ Role,
+ Task,
+)
EMPTY_SELECTION = (None, '---------')
@@ -124,11 +134,31 @@
order_by = ['fullname', '-fullname', 'domain', '-domain', ]
+def filter_taught_workshops(queryset, values):
+ """Limit Persons to only instructors from events with specific tags.
+
+ This needs to be in a separate function because django-filters doesn't
+ support `action` parameter as supposed, ie. with
+ `action='filter_taught_workshops'` it doesn't call the method; instead it
+ tries calling a string, which results in error."""
+ if not values:
+ return queryset
+
+ return queryset.filter(task__role__name='instructor') \
+ .filter(task__event__tags__in=values) \
+ .distinct()
+
+
class PersonFilter(django_filters.FilterSet):
+ taught_workshops = django_filters.ModelMultipleChoiceFilter(
+ queryset=Tag.objects.all(), label='Taught at workshops of type',
+ action=filter_taught_workshops,
+ )
+
class Meta:
model = Person
fields = [
- 'badges',
+ 'badges', 'taught_workshops',
]
order_by = ["lastname", "-lastname", "firstname", "-firstname",
"email", "-email"]
|
{"golden_diff": "diff --git a/workshops/filters.py b/workshops/filters.py\n--- a/workshops/filters.py\n+++ b/workshops/filters.py\n@@ -5,7 +5,17 @@\n import django_filters\n from django_countries import Countries\n \n-from workshops.models import Event, Host, Person, Task, Airport, EventRequest\n+from workshops.models import (\n+ Event,\n+ Host,\n+ Person,\n+ Task,\n+ Airport,\n+ EventRequest,\n+ Tag,\n+ Role,\n+ Task,\n+)\n \n EMPTY_SELECTION = (None, '---------')\n \n@@ -124,11 +134,31 @@\n order_by = ['fullname', '-fullname', 'domain', '-domain', ]\n \n \n+def filter_taught_workshops(queryset, values):\n+ \"\"\"Limit Persons to only instructors from events with specific tags.\n+\n+ This needs to be in a separate function because django-filters doesn't\n+ support `action` parameter as supposed, ie. with\n+ `action='filter_taught_workshops'` it doesn't call the method; instead it\n+ tries calling a string, which results in error.\"\"\"\n+ if not values:\n+ return queryset\n+\n+ return queryset.filter(task__role__name='instructor') \\\n+ .filter(task__event__tags__in=values) \\\n+ .distinct()\n+\n+\n class PersonFilter(django_filters.FilterSet):\n+ taught_workshops = django_filters.ModelMultipleChoiceFilter(\n+ queryset=Tag.objects.all(), label='Taught at workshops of type',\n+ action=filter_taught_workshops,\n+ )\n+\n class Meta:\n model = Person\n fields = [\n- 'badges',\n+ 'badges', 'taught_workshops',\n ]\n order_by = [\"lastname\", \"-lastname\", \"firstname\", \"-firstname\",\n \"email\", \"-email\"]\n", "issue": "List of people who taught at events of specific type\nUsecase: Tracy wants to grab list of people who taught at DC workshops, so that she knows who is experienced DC instructor.\n\n", "before_files": [{"content": "from distutils.util import strtobool\n\nimport django.forms.widgets\n\nimport django_filters\nfrom django_countries import Countries\n\nfrom workshops.models import Event, Host, Person, Task, Airport, EventRequest\n\nEMPTY_SELECTION = (None, '---------')\n\n\nclass AllCountriesFilter(django_filters.ChoiceFilter):\n @property\n def field(self):\n qs = self.model._default_manager.distinct()\n qs = qs.order_by(self.name).values_list(self.name, flat=True)\n\n choices = [o for o in qs if o]\n countries = Countries()\n countries.only = choices\n\n self.extra['choices'] = list(countries)\n self.extra['choices'].insert(0, EMPTY_SELECTION)\n return super().field\n\n\nclass ForeignKeyAllValuesFilter(django_filters.ChoiceFilter):\n def __init__(self, model, *args, **kwargs):\n self.lookup_model = model\n super().__init__(*args, **kwargs)\n\n @property\n def field(self):\n name = self.name\n model = self.lookup_model\n\n qs1 = self.model._default_manager.distinct()\n qs1 = qs1.order_by(name).values_list(name, flat=True)\n qs2 = model.objects.filter(pk__in=qs1)\n self.extra['choices'] = [(o.pk, str(o)) for o in qs2]\n self.extra['choices'].insert(0, EMPTY_SELECTION)\n return super().field\n\n\nclass EventStateFilter(django_filters.ChoiceFilter):\n def filter(self, qs, value):\n if isinstance(value, django_filters.fields.Lookup):\n value = value.value\n\n # no filtering\n if value in ([], (), {}, None, '', 'all'):\n return qs\n\n # no need to check if value exists in self.extra['choices'] because\n # validation is done by django_filters\n try:\n return getattr(qs, \"{}_events\".format(value))()\n except AttributeError:\n return qs\n\n\nclass EventFilter(django_filters.FilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person)\n host = ForeignKeyAllValuesFilter(Host)\n administrator = ForeignKeyAllValuesFilter(Host)\n\n STATUS_CHOICES = [\n ('', 'All'),\n ('past', 'Past'),\n ('ongoing', 'Ongoing'),\n ('upcoming', 'Upcoming'),\n ('unpublished', 'Unpublished'),\n ('uninvoiced', 'Uninvoiced'),\n ]\n status = EventStateFilter(choices=STATUS_CHOICES)\n\n invoice_status = django_filters.ChoiceFilter(\n choices=(EMPTY_SELECTION, ) + Event.INVOICED_CHOICES,\n )\n\n class Meta:\n model = Event\n fields = [\n 'assigned_to',\n 'tags',\n 'host',\n 'administrator',\n 'invoice_status',\n 'completed',\n ]\n order_by = ['-slug', 'slug', 'start', '-start', 'end', '-end']\n\n\nclass EventRequestFilter(django_filters.FilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person)\n country = AllCountriesFilter()\n active = django_filters.TypedChoiceFilter(\n choices=(('true', 'Open'), ('false', 'Closed')),\n coerce=strtobool,\n label='Status',\n widget=django.forms.widgets.RadioSelect,\n )\n\n class Meta:\n model = EventRequest\n fields = [\n 'assigned_to',\n 'workshop_type',\n 'active',\n 'country',\n ]\n order_by = ['-created_at', 'created_at']\n\n\nclass HostFilter(django_filters.FilterSet):\n country = AllCountriesFilter()\n\n class Meta:\n model = Host\n fields = [\n 'country',\n ]\n order_by = ['fullname', '-fullname', 'domain', '-domain', ]\n\n\nclass PersonFilter(django_filters.FilterSet):\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n order_by = [\"lastname\", \"-lastname\", \"firstname\", \"-firstname\",\n \"email\", \"-email\"]\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass TaskFilter(django_filters.FilterSet):\n class Meta:\n model = Task\n fields = [\n 'event',\n # can't filter on person because person's name contains 3 fields:\n # person.personal, person.middle, person.family\n # 'person',\n 'role',\n ]\n order_by = [\n ['event__slug', 'Event'],\n ['-event__slug', 'Event (descending)'],\n ['person__family', 'Person'],\n ['-person__family', 'Person (descending)'],\n ['role', 'Role'],\n ['-role', 'Role (descending)'],\n ]\n\n\nclass AirportFilter(django_filters.FilterSet):\n fullname = django_filters.CharFilter(lookup_type='icontains')\n\n class Meta:\n model = Airport\n fields = [\n 'fullname',\n ]\n order_by = [\"iata\", \"-iata\", \"fullname\", \"-fullname\"]\n", "path": "workshops/filters.py"}], "after_files": [{"content": "from distutils.util import strtobool\n\nimport django.forms.widgets\n\nimport django_filters\nfrom django_countries import Countries\n\nfrom workshops.models import (\n Event,\n Host,\n Person,\n Task,\n Airport,\n EventRequest,\n Tag,\n Role,\n Task,\n)\n\nEMPTY_SELECTION = (None, '---------')\n\n\nclass AllCountriesFilter(django_filters.ChoiceFilter):\n @property\n def field(self):\n qs = self.model._default_manager.distinct()\n qs = qs.order_by(self.name).values_list(self.name, flat=True)\n\n choices = [o for o in qs if o]\n countries = Countries()\n countries.only = choices\n\n self.extra['choices'] = list(countries)\n self.extra['choices'].insert(0, EMPTY_SELECTION)\n return super().field\n\n\nclass ForeignKeyAllValuesFilter(django_filters.ChoiceFilter):\n def __init__(self, model, *args, **kwargs):\n self.lookup_model = model\n super().__init__(*args, **kwargs)\n\n @property\n def field(self):\n name = self.name\n model = self.lookup_model\n\n qs1 = self.model._default_manager.distinct()\n qs1 = qs1.order_by(name).values_list(name, flat=True)\n qs2 = model.objects.filter(pk__in=qs1)\n self.extra['choices'] = [(o.pk, str(o)) for o in qs2]\n self.extra['choices'].insert(0, EMPTY_SELECTION)\n return super().field\n\n\nclass EventStateFilter(django_filters.ChoiceFilter):\n def filter(self, qs, value):\n if isinstance(value, django_filters.fields.Lookup):\n value = value.value\n\n # no filtering\n if value in ([], (), {}, None, '', 'all'):\n return qs\n\n # no need to check if value exists in self.extra['choices'] because\n # validation is done by django_filters\n try:\n return getattr(qs, \"{}_events\".format(value))()\n except AttributeError:\n return qs\n\n\nclass EventFilter(django_filters.FilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person)\n host = ForeignKeyAllValuesFilter(Host)\n administrator = ForeignKeyAllValuesFilter(Host)\n\n STATUS_CHOICES = [\n ('', 'All'),\n ('past', 'Past'),\n ('ongoing', 'Ongoing'),\n ('upcoming', 'Upcoming'),\n ('unpublished', 'Unpublished'),\n ('uninvoiced', 'Uninvoiced'),\n ]\n status = EventStateFilter(choices=STATUS_CHOICES)\n\n invoice_status = django_filters.ChoiceFilter(\n choices=(EMPTY_SELECTION, ) + Event.INVOICED_CHOICES,\n )\n\n class Meta:\n model = Event\n fields = [\n 'assigned_to',\n 'tags',\n 'host',\n 'administrator',\n 'invoice_status',\n 'completed',\n ]\n order_by = ['-slug', 'slug', 'start', '-start', 'end', '-end']\n\n\nclass EventRequestFilter(django_filters.FilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person)\n country = AllCountriesFilter()\n active = django_filters.TypedChoiceFilter(\n choices=(('true', 'Open'), ('false', 'Closed')),\n coerce=strtobool,\n label='Status',\n widget=django.forms.widgets.RadioSelect,\n )\n\n class Meta:\n model = EventRequest\n fields = [\n 'assigned_to',\n 'workshop_type',\n 'active',\n 'country',\n ]\n order_by = ['-created_at', 'created_at']\n\n\nclass HostFilter(django_filters.FilterSet):\n country = AllCountriesFilter()\n\n class Meta:\n model = Host\n fields = [\n 'country',\n ]\n order_by = ['fullname', '-fullname', 'domain', '-domain', ]\n\n\ndef filter_taught_workshops(queryset, values):\n \"\"\"Limit Persons to only instructors from events with specific tags.\n\n This needs to be in a separate function because django-filters doesn't\n support `action` parameter as supposed, ie. with\n `action='filter_taught_workshops'` it doesn't call the method; instead it\n tries calling a string, which results in error.\"\"\"\n if not values:\n return queryset\n\n return queryset.filter(task__role__name='instructor') \\\n .filter(task__event__tags__in=values) \\\n .distinct()\n\n\nclass PersonFilter(django_filters.FilterSet):\n taught_workshops = django_filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(), label='Taught at workshops of type',\n action=filter_taught_workshops,\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges', 'taught_workshops',\n ]\n order_by = [\"lastname\", \"-lastname\", \"firstname\", \"-firstname\",\n \"email\", \"-email\"]\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass TaskFilter(django_filters.FilterSet):\n class Meta:\n model = Task\n fields = [\n 'event',\n # can't filter on person because person's name contains 3 fields:\n # person.personal, person.middle, person.family\n # 'person',\n 'role',\n ]\n order_by = [\n ['event__slug', 'Event'],\n ['-event__slug', 'Event (descending)'],\n ['person__family', 'Person'],\n ['-person__family', 'Person (descending)'],\n ['role', 'Role'],\n ['-role', 'Role (descending)'],\n ]\n\n\nclass AirportFilter(django_filters.FilterSet):\n fullname = django_filters.CharFilter(lookup_type='icontains')\n\n class Meta:\n model = Airport\n fields = [\n 'fullname',\n ]\n order_by = [\"iata\", \"-iata\", \"fullname\", \"-fullname\"]\n", "path": "workshops/filters.py"}]}
| 1,873 | 410 |
gh_patches_debug_8168
|
rasdani/github-patches
|
git_diff
|
pyg-team__pytorch_geometric-7387
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail to import Nell dataset
### 🐛 Describe the bug
I tried to import Nell data set using NELL class:
from torch_geometric.datasets import NELL
dataset = NELL(root='data/Nell')
data = dataset[0]
But I got the following error message:
Traceback (most recent call last):
File "c:\Users\13466\Desktop\USTLab\LabDoc\HPCA23\Nell.py", line 10, in <module>
dataset = NELL(root='data/Nell')
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\datasets\nell.py", line 62, in __init__
super().__init__(root, transform, pre_transform)
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\data\in_memory_dataset.py", line 57, in __init__
super().__init__(root, transform, pre_transform, pre_filter, log)
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\data\dataset.py", line 97, in __init__
self._process()
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\data\dataset.py", line 230, in _process
self.process()
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\datasets\nell.py", line 82, in process
data = read_planetoid_data(self.raw_dir, 'nell.0.001')
File "C:\Users\13466\anaconda3\lib\site-packages\torch_geometric\io\planetoid.py", line 53, in read_planetoid_data
row, col, value = SparseTensor.from_dense(x).coo()
AttributeError: type object 'SparseTensor' has no attribute 'from_dense'
### Environment
* PyG version:2.3.1
* PyTorch version:2.0.1
* OS:Windows 11
* Python version:3.10
* CUDA/cuDNN version:
* How you installed PyTorch and PyG (`conda`, `pip`, source):pip
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/typing.py`
Content:
```
1 import warnings
2 from typing import Dict, List, Optional, Tuple, Union
3
4 import numpy as np
5 import torch
6 from torch import Tensor
7
8 WITH_PT2 = int(torch.__version__.split('.')[0]) >= 2
9
10 try:
11 import pyg_lib # noqa
12 WITH_PYG_LIB = True
13 WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul')
14 WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add')
15 WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort')
16 except (ImportError, OSError) as e:
17 if isinstance(e, OSError):
18 warnings.warn(f"An issue occurred while importing 'pyg-lib'. "
19 f"Disabling its usage. Stacktrace: {e}")
20 pyg_lib = object
21 WITH_PYG_LIB = False
22 WITH_GMM = False
23 WITH_SAMPLED_OP = False
24 WITH_INDEX_SORT = False
25
26 try:
27 import torch_scatter # noqa
28 WITH_TORCH_SCATTER = True
29 except (ImportError, OSError) as e:
30 if isinstance(e, OSError):
31 warnings.warn(f"An issue occurred while importing 'torch-scatter'. "
32 f"Disabling its usage. Stacktrace: {e}")
33 torch_scatter = object
34 WITH_TORCH_SCATTER = False
35
36 try:
37 import torch_cluster # noqa
38 WITH_TORCH_CLUSTER = True
39 WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__
40 except (ImportError, OSError) as e:
41 if isinstance(e, OSError):
42 warnings.warn(f"An issue occurred while importing 'torch-cluster'. "
43 f"Disabling its usage. Stacktrace: {e}")
44 WITH_TORCH_CLUSTER = False
45
46 try:
47 import torch_spline_conv # noqa
48 WITH_TORCH_SPLINE_CONV = True
49 except (ImportError, OSError) as e:
50 if isinstance(e, OSError):
51 warnings.warn(
52 f"An issue occurred while importing 'torch-spline-conv'. "
53 f"Disabling its usage. Stacktrace: {e}")
54 WITH_TORCH_SPLINE_CONV = False
55
56 try:
57 import torch_sparse # noqa
58 from torch_sparse import SparseStorage, SparseTensor
59 WITH_TORCH_SPARSE = True
60 except (ImportError, OSError) as e:
61 if isinstance(e, OSError):
62 warnings.warn(f"An issue occurred while importing 'torch-sparse'. "
63 f"Disabling its usage. Stacktrace: {e}")
64 WITH_TORCH_SPARSE = False
65
66 class SparseStorage:
67 def __init__(
68 self,
69 row: Optional[Tensor] = None,
70 rowptr: Optional[Tensor] = None,
71 col: Optional[Tensor] = None,
72 value: Optional[Tensor] = None,
73 sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,
74 rowcount: Optional[Tensor] = None,
75 colptr: Optional[Tensor] = None,
76 colcount: Optional[Tensor] = None,
77 csr2csc: Optional[Tensor] = None,
78 csc2csr: Optional[Tensor] = None,
79 is_sorted: bool = False,
80 trust_data: bool = False,
81 ):
82 raise ImportError("'SparseStorage' requires 'torch-sparse'")
83
84 class SparseTensor:
85 def __init__(
86 self,
87 row: Optional[Tensor] = None,
88 rowptr: Optional[Tensor] = None,
89 col: Optional[Tensor] = None,
90 value: Optional[Tensor] = None,
91 sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,
92 is_sorted: bool = False,
93 trust_data: bool = False,
94 ):
95 raise ImportError("'SparseTensor' requires 'torch-sparse'")
96
97 @classmethod
98 def from_edge_index(
99 self,
100 edge_index: Tensor,
101 edge_attr: Optional[Tensor] = None,
102 sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,
103 is_sorted: bool = False,
104 trust_data: bool = False,
105 ) -> 'SparseTensor':
106 raise ImportError("'SparseTensor' requires 'torch-sparse'")
107
108 def size(self, dim: int) -> int:
109 raise ImportError("'SparseTensor' requires 'torch-sparse'")
110
111 def is_cuda(self) -> bool:
112 raise ImportError("'SparseTensor' requires 'torch-sparse'")
113
114 def has_value(self) -> bool:
115 raise ImportError("'SparseTensor' requires 'torch-sparse'")
116
117 def set_value(self, value: Optional[Tensor],
118 layout: Optional[str] = None) -> 'SparseTensor':
119 raise ImportError("'SparseTensor' requires 'torch-sparse'")
120
121 def fill_value(self, fill_value: float,
122 dtype: Optional[torch.dtype] = None) -> 'SparseTensor':
123 raise ImportError("'SparseTensor' requires 'torch-sparse'")
124
125 def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
126 raise ImportError("'SparseTensor' requires 'torch-sparse'")
127
128 def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
129 raise ImportError("'SparseTensor' requires 'torch-sparse'")
130
131 def to_torch_sparse_csr_tensor(
132 self,
133 dtype: Optional[torch.dtype] = None,
134 ) -> Tensor:
135 raise ImportError("'SparseTensor' requires 'torch-sparse'")
136
137 class torch_sparse:
138 @staticmethod
139 def matmul(src: SparseTensor, other: Tensor,
140 reduce: str = "sum") -> Tensor:
141 raise ImportError("'matmul' requires 'torch-sparse'")
142
143 @staticmethod
144 def sum(src: SparseTensor, dim: Optional[int] = None) -> Tensor:
145 raise ImportError("'sum' requires 'torch-sparse'")
146
147 @staticmethod
148 def mul(src: SparseTensor, other: Tensor) -> SparseTensor:
149 raise ImportError("'mul' requires 'torch-sparse'")
150
151 @staticmethod
152 def set_diag(src: SparseTensor, values: Optional[Tensor] = None,
153 k: int = 0) -> SparseTensor:
154 raise ImportError("'set_diag' requires 'torch-sparse'")
155
156 @staticmethod
157 def fill_diag(src: SparseTensor, fill_value: float,
158 k: int = 0) -> SparseTensor:
159 raise ImportError("'fill_diag' requires 'torch-sparse'")
160
161 @staticmethod
162 def masked_select_nnz(src: SparseTensor, mask: Tensor,
163 layout: Optional[str] = None) -> SparseTensor:
164 raise ImportError("'masked_select_nnz' requires 'torch-sparse'")
165
166
167 # Types for accessing data ####################################################
168
169 # Node-types are denoted by a single string, e.g.: `data['paper']`:
170 NodeType = str
171
172 # Edge-types are denotes by a triplet of strings, e.g.:
173 # `data[('author', 'writes', 'paper')]
174 EdgeType = Tuple[str, str, str]
175
176 DEFAULT_REL = 'to'
177 EDGE_TYPE_STR_SPLIT = '__'
178
179
180 class EdgeTypeStr(str):
181 r"""A helper class to construct serializable edge types by merging an edge
182 type tuple into a single string."""
183 def __new__(cls, *args):
184 if isinstance(args[0], (list, tuple)):
185 # Unwrap `EdgeType((src, rel, dst))` and `EdgeTypeStr((src, dst))`:
186 args = tuple(args[0])
187
188 if len(args) == 1 and isinstance(args[0], str):
189 args = args[0] # An edge type string was passed.
190
191 elif len(args) == 2 and all(isinstance(arg, str) for arg in args):
192 # A `(src, dst)` edge type was passed - add `DEFAULT_REL`:
193 args = (args[0], DEFAULT_REL, args[1])
194 args = EDGE_TYPE_STR_SPLIT.join(args)
195
196 elif len(args) == 3 and all(isinstance(arg, str) for arg in args):
197 # A `(src, rel, dst)` edge type was passed:
198 args = EDGE_TYPE_STR_SPLIT.join(args)
199
200 else:
201 raise ValueError(f"Encountered invalid edge type '{args}'")
202
203 return str.__new__(cls, args)
204
205 def to_tuple(self) -> EdgeType:
206 r"""Returns the original edge type."""
207 out = tuple(self.split(EDGE_TYPE_STR_SPLIT))
208 if len(out) != 3:
209 raise ValueError(f"Cannot convert the edge type '{self}' to a "
210 f"tuple since it holds invalid characters")
211 return out
212
213
214 # There exist some short-cuts to query edge-types (given that the full triplet
215 # can be uniquely reconstructed, e.g.:
216 # * via str: `data['writes']`
217 # * via Tuple[str, str]: `data[('author', 'paper')]`
218 QueryType = Union[NodeType, EdgeType, str, Tuple[str, str]]
219
220 Metadata = Tuple[List[NodeType], List[EdgeType]]
221
222 # A representation of a feature tensor
223 FeatureTensorType = Union[Tensor, np.ndarray]
224
225 # A representation of an edge index, following the possible formats:
226 # * COO: (row, col)
227 # * CSC: (row, colptr)
228 # * CSR: (rowptr, col)
229 EdgeTensorType = Tuple[Tensor, Tensor]
230
231 # Types for message passing ###################################################
232
233 Adj = Union[Tensor, SparseTensor]
234 OptTensor = Optional[Tensor]
235 PairTensor = Tuple[Tensor, Tensor]
236 OptPairTensor = Tuple[Tensor, Optional[Tensor]]
237 PairOptTensor = Tuple[Optional[Tensor], Optional[Tensor]]
238 Size = Optional[Tuple[int, int]]
239 NoneType = Optional[Tensor]
240
241 MaybeHeteroNodeTensor = Union[Tensor, Dict[NodeType, Tensor]]
242 MaybeHeteroEdgeTensor = Union[Tensor, Dict[EdgeType, Tensor]]
243
244 # Types for sampling ##########################################################
245
246 InputNodes = Union[OptTensor, NodeType, Tuple[NodeType, OptTensor]]
247 InputEdges = Union[OptTensor, EdgeType, Tuple[EdgeType, OptTensor]]
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py
--- a/torch_geometric/typing.py
+++ b/torch_geometric/typing.py
@@ -105,6 +105,11 @@
) -> 'SparseTensor':
raise ImportError("'SparseTensor' requires 'torch-sparse'")
+ @classmethod
+ def from_dense(self, mat: Tensor,
+ has_value: bool = True) -> 'SparseTensor':
+ raise ImportError("'SparseTensor' requires 'torch-sparse'")
+
def size(self, dim: int) -> int:
raise ImportError("'SparseTensor' requires 'torch-sparse'")
|
{"golden_diff": "diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py\n--- a/torch_geometric/typing.py\n+++ b/torch_geometric/typing.py\n@@ -105,6 +105,11 @@\n ) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n \n+ @classmethod\n+ def from_dense(self, mat: Tensor,\n+ has_value: bool = True) -> 'SparseTensor':\n+ raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n+\n def size(self, dim: int) -> int:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n", "issue": "Fail to import Nell dataset\n### \ud83d\udc1b Describe the bug\n\nI tried to import Nell data set using NELL class:\r\nfrom torch_geometric.datasets import NELL\r\n\r\ndataset = NELL(root='data/Nell')\r\ndata = dataset[0]\r\n\r\nBut I got the following error message:\r\nTraceback (most recent call last):\r\n File \"c:\\Users\\13466\\Desktop\\USTLab\\LabDoc\\HPCA23\\Nell.py\", line 10, in <module>\r\n dataset = NELL(root='data/Nell')\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\datasets\\nell.py\", line 62, in __init__\r\n super().__init__(root, transform, pre_transform)\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\data\\in_memory_dataset.py\", line 57, in __init__\r\n super().__init__(root, transform, pre_transform, pre_filter, log)\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\data\\dataset.py\", line 97, in __init__\r\n self._process()\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\data\\dataset.py\", line 230, in _process\r\n self.process()\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\datasets\\nell.py\", line 82, in process\r\n data = read_planetoid_data(self.raw_dir, 'nell.0.001')\r\n File \"C:\\Users\\13466\\anaconda3\\lib\\site-packages\\torch_geometric\\io\\planetoid.py\", line 53, in read_planetoid_data\r\n row, col, value = SparseTensor.from_dense(x).coo()\r\nAttributeError: type object 'SparseTensor' has no attribute 'from_dense'\r\n\r\n\n\n### Environment\n\n* PyG version:2.3.1\r\n* PyTorch version:2.0.1\r\n* OS:Windows 11\r\n* Python version:3.10\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):pip\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import warnings\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nWITH_PT2 = int(torch.__version__.split('.')[0]) >= 2\n\ntry:\n import pyg_lib # noqa\n WITH_PYG_LIB = True\n WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul')\n WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add')\n WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort')\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'pyg-lib'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n pyg_lib = object\n WITH_PYG_LIB = False\n WITH_GMM = False\n WITH_SAMPLED_OP = False\n WITH_INDEX_SORT = False\n\ntry:\n import torch_scatter # noqa\n WITH_TORCH_SCATTER = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-scatter'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n torch_scatter = object\n WITH_TORCH_SCATTER = False\n\ntry:\n import torch_cluster # noqa\n WITH_TORCH_CLUSTER = True\n WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-cluster'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_CLUSTER = False\n\ntry:\n import torch_spline_conv # noqa\n WITH_TORCH_SPLINE_CONV = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(\n f\"An issue occurred while importing 'torch-spline-conv'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_SPLINE_CONV = False\n\ntry:\n import torch_sparse # noqa\n from torch_sparse import SparseStorage, SparseTensor\n WITH_TORCH_SPARSE = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-sparse'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_SPARSE = False\n\n class SparseStorage:\n def __init__(\n self,\n row: Optional[Tensor] = None,\n rowptr: Optional[Tensor] = None,\n col: Optional[Tensor] = None,\n value: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n rowcount: Optional[Tensor] = None,\n colptr: Optional[Tensor] = None,\n colcount: Optional[Tensor] = None,\n csr2csc: Optional[Tensor] = None,\n csc2csr: Optional[Tensor] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ):\n raise ImportError(\"'SparseStorage' requires 'torch-sparse'\")\n\n class SparseTensor:\n def __init__(\n self,\n row: Optional[Tensor] = None,\n rowptr: Optional[Tensor] = None,\n col: Optional[Tensor] = None,\n value: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ):\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n @classmethod\n def from_edge_index(\n self,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def size(self, dim: int) -> int:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def is_cuda(self) -> bool:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def has_value(self) -> bool:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def set_value(self, value: Optional[Tensor],\n layout: Optional[str] = None) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def fill_value(self, fill_value: float,\n dtype: Optional[torch.dtype] = None) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def to_torch_sparse_csr_tensor(\n self,\n dtype: Optional[torch.dtype] = None,\n ) -> Tensor:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n class torch_sparse:\n @staticmethod\n def matmul(src: SparseTensor, other: Tensor,\n reduce: str = \"sum\") -> Tensor:\n raise ImportError(\"'matmul' requires 'torch-sparse'\")\n\n @staticmethod\n def sum(src: SparseTensor, dim: Optional[int] = None) -> Tensor:\n raise ImportError(\"'sum' requires 'torch-sparse'\")\n\n @staticmethod\n def mul(src: SparseTensor, other: Tensor) -> SparseTensor:\n raise ImportError(\"'mul' requires 'torch-sparse'\")\n\n @staticmethod\n def set_diag(src: SparseTensor, values: Optional[Tensor] = None,\n k: int = 0) -> SparseTensor:\n raise ImportError(\"'set_diag' requires 'torch-sparse'\")\n\n @staticmethod\n def fill_diag(src: SparseTensor, fill_value: float,\n k: int = 0) -> SparseTensor:\n raise ImportError(\"'fill_diag' requires 'torch-sparse'\")\n\n @staticmethod\n def masked_select_nnz(src: SparseTensor, mask: Tensor,\n layout: Optional[str] = None) -> SparseTensor:\n raise ImportError(\"'masked_select_nnz' requires 'torch-sparse'\")\n\n\n# Types for accessing data ####################################################\n\n# Node-types are denoted by a single string, e.g.: `data['paper']`:\nNodeType = str\n\n# Edge-types are denotes by a triplet of strings, e.g.:\n# `data[('author', 'writes', 'paper')]\nEdgeType = Tuple[str, str, str]\n\nDEFAULT_REL = 'to'\nEDGE_TYPE_STR_SPLIT = '__'\n\n\nclass EdgeTypeStr(str):\n r\"\"\"A helper class to construct serializable edge types by merging an edge\n type tuple into a single string.\"\"\"\n def __new__(cls, *args):\n if isinstance(args[0], (list, tuple)):\n # Unwrap `EdgeType((src, rel, dst))` and `EdgeTypeStr((src, dst))`:\n args = tuple(args[0])\n\n if len(args) == 1 and isinstance(args[0], str):\n args = args[0] # An edge type string was passed.\n\n elif len(args) == 2 and all(isinstance(arg, str) for arg in args):\n # A `(src, dst)` edge type was passed - add `DEFAULT_REL`:\n args = (args[0], DEFAULT_REL, args[1])\n args = EDGE_TYPE_STR_SPLIT.join(args)\n\n elif len(args) == 3 and all(isinstance(arg, str) for arg in args):\n # A `(src, rel, dst)` edge type was passed:\n args = EDGE_TYPE_STR_SPLIT.join(args)\n\n else:\n raise ValueError(f\"Encountered invalid edge type '{args}'\")\n\n return str.__new__(cls, args)\n\n def to_tuple(self) -> EdgeType:\n r\"\"\"Returns the original edge type.\"\"\"\n out = tuple(self.split(EDGE_TYPE_STR_SPLIT))\n if len(out) != 3:\n raise ValueError(f\"Cannot convert the edge type '{self}' to a \"\n f\"tuple since it holds invalid characters\")\n return out\n\n\n# There exist some short-cuts to query edge-types (given that the full triplet\n# can be uniquely reconstructed, e.g.:\n# * via str: `data['writes']`\n# * via Tuple[str, str]: `data[('author', 'paper')]`\nQueryType = Union[NodeType, EdgeType, str, Tuple[str, str]]\n\nMetadata = Tuple[List[NodeType], List[EdgeType]]\n\n# A representation of a feature tensor\nFeatureTensorType = Union[Tensor, np.ndarray]\n\n# A representation of an edge index, following the possible formats:\n# * COO: (row, col)\n# * CSC: (row, colptr)\n# * CSR: (rowptr, col)\nEdgeTensorType = Tuple[Tensor, Tensor]\n\n# Types for message passing ###################################################\n\nAdj = Union[Tensor, SparseTensor]\nOptTensor = Optional[Tensor]\nPairTensor = Tuple[Tensor, Tensor]\nOptPairTensor = Tuple[Tensor, Optional[Tensor]]\nPairOptTensor = Tuple[Optional[Tensor], Optional[Tensor]]\nSize = Optional[Tuple[int, int]]\nNoneType = Optional[Tensor]\n\nMaybeHeteroNodeTensor = Union[Tensor, Dict[NodeType, Tensor]]\nMaybeHeteroEdgeTensor = Union[Tensor, Dict[EdgeType, Tensor]]\n\n# Types for sampling ##########################################################\n\nInputNodes = Union[OptTensor, NodeType, Tuple[NodeType, OptTensor]]\nInputEdges = Union[OptTensor, EdgeType, Tuple[EdgeType, OptTensor]]\n", "path": "torch_geometric/typing.py"}], "after_files": [{"content": "import warnings\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nWITH_PT2 = int(torch.__version__.split('.')[0]) >= 2\n\ntry:\n import pyg_lib # noqa\n WITH_PYG_LIB = True\n WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul')\n WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add')\n WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort')\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'pyg-lib'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n pyg_lib = object\n WITH_PYG_LIB = False\n WITH_GMM = False\n WITH_SAMPLED_OP = False\n WITH_INDEX_SORT = False\n\ntry:\n import torch_scatter # noqa\n WITH_TORCH_SCATTER = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-scatter'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n torch_scatter = object\n WITH_TORCH_SCATTER = False\n\ntry:\n import torch_cluster # noqa\n WITH_TORCH_CLUSTER = True\n WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-cluster'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_CLUSTER = False\n\ntry:\n import torch_spline_conv # noqa\n WITH_TORCH_SPLINE_CONV = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(\n f\"An issue occurred while importing 'torch-spline-conv'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_SPLINE_CONV = False\n\ntry:\n import torch_sparse # noqa\n from torch_sparse import SparseStorage, SparseTensor\n WITH_TORCH_SPARSE = True\nexcept (ImportError, OSError) as e:\n if isinstance(e, OSError):\n warnings.warn(f\"An issue occurred while importing 'torch-sparse'. \"\n f\"Disabling its usage. Stacktrace: {e}\")\n WITH_TORCH_SPARSE = False\n\n class SparseStorage:\n def __init__(\n self,\n row: Optional[Tensor] = None,\n rowptr: Optional[Tensor] = None,\n col: Optional[Tensor] = None,\n value: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n rowcount: Optional[Tensor] = None,\n colptr: Optional[Tensor] = None,\n colcount: Optional[Tensor] = None,\n csr2csc: Optional[Tensor] = None,\n csc2csr: Optional[Tensor] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ):\n raise ImportError(\"'SparseStorage' requires 'torch-sparse'\")\n\n class SparseTensor:\n def __init__(\n self,\n row: Optional[Tensor] = None,\n rowptr: Optional[Tensor] = None,\n col: Optional[Tensor] = None,\n value: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ):\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n @classmethod\n def from_edge_index(\n self,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None,\n is_sorted: bool = False,\n trust_data: bool = False,\n ) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n @classmethod\n def from_dense(self, mat: Tensor,\n has_value: bool = True) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def size(self, dim: int) -> int:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def is_cuda(self) -> bool:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def has_value(self) -> bool:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def set_value(self, value: Optional[Tensor],\n layout: Optional[str] = None) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def fill_value(self, fill_value: float,\n dtype: Optional[torch.dtype] = None) -> 'SparseTensor':\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n def to_torch_sparse_csr_tensor(\n self,\n dtype: Optional[torch.dtype] = None,\n ) -> Tensor:\n raise ImportError(\"'SparseTensor' requires 'torch-sparse'\")\n\n class torch_sparse:\n @staticmethod\n def matmul(src: SparseTensor, other: Tensor,\n reduce: str = \"sum\") -> Tensor:\n raise ImportError(\"'matmul' requires 'torch-sparse'\")\n\n @staticmethod\n def sum(src: SparseTensor, dim: Optional[int] = None) -> Tensor:\n raise ImportError(\"'sum' requires 'torch-sparse'\")\n\n @staticmethod\n def mul(src: SparseTensor, other: Tensor) -> SparseTensor:\n raise ImportError(\"'mul' requires 'torch-sparse'\")\n\n @staticmethod\n def set_diag(src: SparseTensor, values: Optional[Tensor] = None,\n k: int = 0) -> SparseTensor:\n raise ImportError(\"'set_diag' requires 'torch-sparse'\")\n\n @staticmethod\n def fill_diag(src: SparseTensor, fill_value: float,\n k: int = 0) -> SparseTensor:\n raise ImportError(\"'fill_diag' requires 'torch-sparse'\")\n\n @staticmethod\n def masked_select_nnz(src: SparseTensor, mask: Tensor,\n layout: Optional[str] = None) -> SparseTensor:\n raise ImportError(\"'masked_select_nnz' requires 'torch-sparse'\")\n\n\n# Types for accessing data ####################################################\n\n# Node-types are denoted by a single string, e.g.: `data['paper']`:\nNodeType = str\n\n# Edge-types are denotes by a triplet of strings, e.g.:\n# `data[('author', 'writes', 'paper')]\nEdgeType = Tuple[str, str, str]\n\nDEFAULT_REL = 'to'\nEDGE_TYPE_STR_SPLIT = '__'\n\n\nclass EdgeTypeStr(str):\n r\"\"\"A helper class to construct serializable edge types by merging an edge\n type tuple into a single string.\"\"\"\n def __new__(cls, *args):\n if isinstance(args[0], (list, tuple)):\n # Unwrap `EdgeType((src, rel, dst))` and `EdgeTypeStr((src, dst))`:\n args = tuple(args[0])\n\n if len(args) == 1 and isinstance(args[0], str):\n args = args[0] # An edge type string was passed.\n\n elif len(args) == 2 and all(isinstance(arg, str) for arg in args):\n # A `(src, dst)` edge type was passed - add `DEFAULT_REL`:\n args = (args[0], DEFAULT_REL, args[1])\n args = EDGE_TYPE_STR_SPLIT.join(args)\n\n elif len(args) == 3 and all(isinstance(arg, str) for arg in args):\n # A `(src, rel, dst)` edge type was passed:\n args = EDGE_TYPE_STR_SPLIT.join(args)\n\n else:\n raise ValueError(f\"Encountered invalid edge type '{args}'\")\n\n return str.__new__(cls, args)\n\n def to_tuple(self) -> EdgeType:\n r\"\"\"Returns the original edge type.\"\"\"\n out = tuple(self.split(EDGE_TYPE_STR_SPLIT))\n if len(out) != 3:\n raise ValueError(f\"Cannot convert the edge type '{self}' to a \"\n f\"tuple since it holds invalid characters\")\n return out\n\n\n# There exist some short-cuts to query edge-types (given that the full triplet\n# can be uniquely reconstructed, e.g.:\n# * via str: `data['writes']`\n# * via Tuple[str, str]: `data[('author', 'paper')]`\nQueryType = Union[NodeType, EdgeType, str, Tuple[str, str]]\n\nMetadata = Tuple[List[NodeType], List[EdgeType]]\n\n# A representation of a feature tensor\nFeatureTensorType = Union[Tensor, np.ndarray]\n\n# A representation of an edge index, following the possible formats:\n# * COO: (row, col)\n# * CSC: (row, colptr)\n# * CSR: (rowptr, col)\nEdgeTensorType = Tuple[Tensor, Tensor]\n\n# Types for message passing ###################################################\n\nAdj = Union[Tensor, SparseTensor]\nOptTensor = Optional[Tensor]\nPairTensor = Tuple[Tensor, Tensor]\nOptPairTensor = Tuple[Tensor, Optional[Tensor]]\nPairOptTensor = Tuple[Optional[Tensor], Optional[Tensor]]\nSize = Optional[Tuple[int, int]]\nNoneType = Optional[Tensor]\n\nMaybeHeteroNodeTensor = Union[Tensor, Dict[NodeType, Tensor]]\nMaybeHeteroEdgeTensor = Union[Tensor, Dict[EdgeType, Tensor]]\n\n# Types for sampling ##########################################################\n\nInputNodes = Union[OptTensor, NodeType, Tuple[NodeType, OptTensor]]\nInputEdges = Union[OptTensor, EdgeType, Tuple[EdgeType, OptTensor]]\n", "path": "torch_geometric/typing.py"}]}
| 3,614 | 146 |
gh_patches_debug_35133
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1472
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MalShare uploader not working
**Describe the bug**
In my config I have
```
[output_malshare]
enabled = true
```
and in my logs I have
```
[stdout#info] Sending file to MalShare
[stdout#info] Submited to MalShare
```
but when I check on MalShare I can't find any the binaries that have been caught in my honeypot.
**To Reproduce**
Steps to reproduce the behavior:
1. Enable MalShare submission in your config
2. Wait for a bot to drop a binary in your honeypot
3. Try to find the binary on malshare (search by md5)
4. Observe that the binary is not there
**Expected behavior**
The binary should be uploaded successfully to MalShare
**Server (please complete the following information):**
- OS: [e.g. RedHat Linux 7.1, output of uname -a] Ubuntu 20.04, Linux 5.4.0
- Python: 3.8.5
**Additional context**
Based on [MalShare's API docs](https://malshare.com/doc.php) it seems that uploading files now requires an API key and a slightly different POST path than the one [defined in cowrie](https://github.com/cowrie/cowrie/blob/b848ec261554ee9128640601eb9a6734b2bffefe/src/cowrie/output/malshare.py#L90). Probably adding an API key option to the config and updating the uploader with the new path and to use the API key will solve this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/output/malshare.py`
Content:
```
1 # Copyright (c) 2015 Michel Oosterhof <[email protected]>
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
6 # are met:
7 #
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
13 # 3. The names of the author(s) may not be used to endorse or promote
14 # products derived from this software without specific prior written
15 # permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS`` AND ANY EXPRESS OR
18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 # SUCH DAMAGE.
28
29 """
30 Send files to https://malshare.com/
31 More info https://malshare.com/doc.php
32 """
33
34 from __future__ import absolute_import, division
35
36 import os
37
38 try:
39 from urllib.parse import urlparse
40 except ImportError:
41 from urlparse import urlparse
42 import requests
43
44 import cowrie.core.output
45
46
47 class Output(cowrie.core.output.Output):
48 """
49 malshare output
50
51 TODO: use `treq`
52 """
53 def start(self):
54 """
55 Start output plugin
56 """
57 pass
58
59 def stop(self):
60 """
61 Stop output plugin
62 """
63 pass
64
65 def write(self, entry):
66 if entry["eventid"] == "cowrie.session.file_download":
67 print("Sending file to MalShare")
68 p = urlparse(entry["url"]).path
69 if p == "":
70 fileName = entry["shasum"]
71 else:
72 b = os.path.basename(p)
73 if b == "":
74 fileName = entry["shasum"]
75 else:
76 fileName = b
77
78 self.postfile(entry["outfile"], fileName)
79
80 elif entry["eventid"] == "cowrie.session.file_upload":
81 print("Sending file to MalShare")
82 self.postfile(entry["outfile"], entry["filename"])
83
84 def postfile(self, artifact, fileName):
85 """
86 Send a file to MalShare
87 """
88 try:
89 res = requests.post(
90 "https://malshare.com/api.php?mode=cli",
91 files={fileName: open(artifact, "rb")}
92 )
93 if res and res.ok:
94 print("Submited to MalShare")
95 else:
96 print("MalShare Request failed: {}".format(res.status_code))
97 except Exception as e:
98 print("MalShare Request failed: {}".format(e))
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/output/malshare.py b/src/cowrie/output/malshare.py
--- a/src/cowrie/output/malshare.py
+++ b/src/cowrie/output/malshare.py
@@ -41,7 +41,10 @@
from urlparse import urlparse
import requests
+from twisted.python import log
+
import cowrie.core.output
+from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
@@ -54,7 +57,7 @@
"""
Start output plugin
"""
- pass
+ self.apiKey = CowrieConfig().get('output_malshare', 'api_key')
def stop(self):
"""
@@ -64,7 +67,6 @@
def write(self, entry):
if entry["eventid"] == "cowrie.session.file_download":
- print("Sending file to MalShare")
p = urlparse(entry["url"]).path
if p == "":
fileName = entry["shasum"]
@@ -78,7 +80,6 @@
self.postfile(entry["outfile"], fileName)
elif entry["eventid"] == "cowrie.session.file_upload":
- print("Sending file to MalShare")
self.postfile(entry["outfile"], entry["filename"])
def postfile(self, artifact, fileName):
@@ -87,12 +88,12 @@
"""
try:
res = requests.post(
- "https://malshare.com/api.php?mode=cli",
- files={fileName: open(artifact, "rb")}
+ "https://malshare.com/api.php?api_key="+self.apiKey+"&action=upload",
+ files={"upload": open(artifact, "rb")}
)
if res and res.ok:
- print("Submited to MalShare")
+ log.msg("Submitted to MalShare")
else:
- print("MalShare Request failed: {}".format(res.status_code))
+ log.msg("MalShare Request failed: {}".format(res.status_code))
except Exception as e:
- print("MalShare Request failed: {}".format(e))
+ log.msg("MalShare Request failed: {}".format(e))
|
{"golden_diff": "diff --git a/src/cowrie/output/malshare.py b/src/cowrie/output/malshare.py\n--- a/src/cowrie/output/malshare.py\n+++ b/src/cowrie/output/malshare.py\n@@ -41,7 +41,10 @@\n from urlparse import urlparse\n import requests\n \n+from twisted.python import log\n+\n import cowrie.core.output\n+from cowrie.core.config import CowrieConfig\n \n \n class Output(cowrie.core.output.Output):\n@@ -54,7 +57,7 @@\n \"\"\"\n Start output plugin\n \"\"\"\n- pass\n+ self.apiKey = CowrieConfig().get('output_malshare', 'api_key')\n \n def stop(self):\n \"\"\"\n@@ -64,7 +67,6 @@\n \n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.file_download\":\n- print(\"Sending file to MalShare\")\n p = urlparse(entry[\"url\"]).path\n if p == \"\":\n fileName = entry[\"shasum\"]\n@@ -78,7 +80,6 @@\n self.postfile(entry[\"outfile\"], fileName)\n \n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n- print(\"Sending file to MalShare\")\n self.postfile(entry[\"outfile\"], entry[\"filename\"])\n \n def postfile(self, artifact, fileName):\n@@ -87,12 +88,12 @@\n \"\"\"\n try:\n res = requests.post(\n- \"https://malshare.com/api.php?mode=cli\",\n- files={fileName: open(artifact, \"rb\")}\n+ \"https://malshare.com/api.php?api_key=\"+self.apiKey+\"&action=upload\",\n+ files={\"upload\": open(artifact, \"rb\")}\n )\n if res and res.ok:\n- print(\"Submited to MalShare\")\n+ log.msg(\"Submitted to MalShare\")\n else:\n- print(\"MalShare Request failed: {}\".format(res.status_code))\n+ log.msg(\"MalShare Request failed: {}\".format(res.status_code))\n except Exception as e:\n- print(\"MalShare Request failed: {}\".format(e))\n+ log.msg(\"MalShare Request failed: {}\".format(e))\n", "issue": "MalShare uploader not working\n**Describe the bug**\r\nIn my config I have\r\n```\r\n[output_malshare]\r\nenabled = true\r\n```\r\n\r\nand in my logs I have\r\n```\r\n[stdout#info] Sending file to MalShare\r\n[stdout#info] Submited to MalShare\r\n```\r\n\r\nbut when I check on MalShare I can't find any the binaries that have been caught in my honeypot.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Enable MalShare submission in your config\r\n2. Wait for a bot to drop a binary in your honeypot\r\n3. Try to find the binary on malshare (search by md5)\r\n4. Observe that the binary is not there\r\n\r\n**Expected behavior**\r\nThe binary should be uploaded successfully to MalShare\r\n\r\n**Server (please complete the following information):**\r\n - OS: [e.g. RedHat Linux 7.1, output of uname -a] Ubuntu 20.04, Linux 5.4.0\r\n - Python: 3.8.5\r\n\r\n**Additional context**\r\nBased on [MalShare's API docs](https://malshare.com/doc.php) it seems that uploading files now requires an API key and a slightly different POST path than the one [defined in cowrie](https://github.com/cowrie/cowrie/blob/b848ec261554ee9128640601eb9a6734b2bffefe/src/cowrie/output/malshare.py#L90). Probably adding an API key option to the config and updating the uploader with the new path and to use the API key will solve this.\r\n\n", "before_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS`` AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n\"\"\"\nSend files to https://malshare.com/\nMore info https://malshare.com/doc.php\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nimport requests\n\nimport cowrie.core.output\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n malshare output\n\n TODO: use `treq`\n \"\"\"\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n pass\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.file_download\":\n print(\"Sending file to MalShare\")\n p = urlparse(entry[\"url\"]).path\n if p == \"\":\n fileName = entry[\"shasum\"]\n else:\n b = os.path.basename(p)\n if b == \"\":\n fileName = entry[\"shasum\"]\n else:\n fileName = b\n\n self.postfile(entry[\"outfile\"], fileName)\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n print(\"Sending file to MalShare\")\n self.postfile(entry[\"outfile\"], entry[\"filename\"])\n\n def postfile(self, artifact, fileName):\n \"\"\"\n Send a file to MalShare\n \"\"\"\n try:\n res = requests.post(\n \"https://malshare.com/api.php?mode=cli\",\n files={fileName: open(artifact, \"rb\")}\n )\n if res and res.ok:\n print(\"Submited to MalShare\")\n else:\n print(\"MalShare Request failed: {}\".format(res.status_code))\n except Exception as e:\n print(\"MalShare Request failed: {}\".format(e))\n", "path": "src/cowrie/output/malshare.py"}], "after_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS`` AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n\"\"\"\nSend files to https://malshare.com/\nMore info https://malshare.com/doc.php\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nimport requests\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n malshare output\n\n TODO: use `treq`\n \"\"\"\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n self.apiKey = CowrieConfig().get('output_malshare', 'api_key')\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.file_download\":\n p = urlparse(entry[\"url\"]).path\n if p == \"\":\n fileName = entry[\"shasum\"]\n else:\n b = os.path.basename(p)\n if b == \"\":\n fileName = entry[\"shasum\"]\n else:\n fileName = b\n\n self.postfile(entry[\"outfile\"], fileName)\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n self.postfile(entry[\"outfile\"], entry[\"filename\"])\n\n def postfile(self, artifact, fileName):\n \"\"\"\n Send a file to MalShare\n \"\"\"\n try:\n res = requests.post(\n \"https://malshare.com/api.php?api_key=\"+self.apiKey+\"&action=upload\",\n files={\"upload\": open(artifact, \"rb\")}\n )\n if res and res.ok:\n log.msg(\"Submitted to MalShare\")\n else:\n log.msg(\"MalShare Request failed: {}\".format(res.status_code))\n except Exception as e:\n log.msg(\"MalShare Request failed: {}\".format(e))\n", "path": "src/cowrie/output/malshare.py"}]}
| 1,504 | 484 |
gh_patches_debug_7192
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aiohttp filtering out "Authorization" header
Apparently aiohttp is filtering out the "Authorization" header in aiohttp.wsgi:69 in create_wsgi_environ.
This bug was found while using aiopyramid + jwtauth, you can find more details (and an example project) on https://github.com/housleyjk/aiopyramid/issues/14
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/wsgi.py`
Content:
```
1 """wsgi server.
2
3 TODO:
4 * proxy protocol
5 * x-forward security
6 * wsgi file support (os.sendfile)
7 """
8
9 import asyncio
10 import inspect
11 import io
12 import os
13 import sys
14 from urllib.parse import urlsplit
15
16 import aiohttp
17 from aiohttp import server, hdrs
18
19 __all__ = ('WSGIServerHttpProtocol',)
20
21
22 class WSGIServerHttpProtocol(server.ServerHttpProtocol):
23 """HTTP Server that implements the Python WSGI protocol.
24
25 It uses 'wsgi.async' of 'True'. 'wsgi.input' can behave differently
26 depends on 'readpayload' constructor parameter. If readpayload is set to
27 True, wsgi server reads all incoming data into BytesIO object and
28 sends it as 'wsgi.input' environ var. If readpayload is set to false
29 'wsgi.input' is a StreamReader and application should read incoming
30 data with "yield from environ['wsgi.input'].read()". It defaults to False.
31 """
32
33 SCRIPT_NAME = os.environ.get('SCRIPT_NAME', '')
34
35 def __init__(self, app, readpayload=False, is_ssl=False, *args, **kw):
36 super().__init__(*args, **kw)
37
38 self.wsgi = app
39 self.is_ssl = is_ssl
40 self.readpayload = readpayload
41
42 def create_wsgi_response(self, message):
43 return WsgiResponse(self.writer, message)
44
45 def create_wsgi_environ(self, message, payload):
46 uri_parts = urlsplit(message.path)
47 url_scheme = 'https' if self.is_ssl else 'http'
48
49 environ = {
50 'wsgi.input': payload,
51 'wsgi.errors': sys.stderr,
52 'wsgi.version': (1, 0),
53 'wsgi.async': True,
54 'wsgi.multithread': False,
55 'wsgi.multiprocess': False,
56 'wsgi.run_once': False,
57 'wsgi.file_wrapper': FileWrapper,
58 'wsgi.url_scheme': url_scheme,
59 'SERVER_SOFTWARE': aiohttp.HttpMessage.SERVER_SOFTWARE,
60 'REQUEST_METHOD': message.method,
61 'QUERY_STRING': uri_parts.query or '',
62 'RAW_URI': message.path,
63 'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version
64 }
65
66 script_name = self.SCRIPT_NAME
67
68 for hdr_name, hdr_value in message.headers.items():
69 if hdr_name == 'AUTHORIZATION':
70 continue
71 elif hdr_name == 'SCRIPT_NAME':
72 script_name = hdr_value
73 elif hdr_name == 'CONTENT-TYPE':
74 environ['CONTENT_TYPE'] = hdr_value
75 continue
76 elif hdr_name == 'CONTENT-LENGTH':
77 environ['CONTENT_LENGTH'] = hdr_value
78 continue
79
80 key = 'HTTP_%s' % hdr_name.replace('-', '_')
81 if key in environ:
82 hdr_value = '%s,%s' % (environ[key], hdr_value)
83
84 environ[key] = hdr_value
85
86 # authors should be aware that REMOTE_HOST and REMOTE_ADDR
87 # may not qualify the remote addr
88 # also SERVER_PORT variable MUST be set to the TCP/IP port number on
89 # which this request is received from the client.
90 # http://www.ietf.org/rfc/rfc3875
91
92 remote = self.transport.get_extra_info('peername')
93 environ['REMOTE_ADDR'] = remote[0]
94 environ['REMOTE_PORT'] = remote[1]
95
96 sockname = self.transport.get_extra_info('sockname')
97 environ['SERVER_PORT'] = str(sockname[1])
98 host = message.headers.get("HOST", None)
99 if host:
100 environ['SERVER_NAME'] = host.split(":")[0]
101 else:
102 environ['SERVER_NAME'] = sockname[0]
103
104 path_info = uri_parts.path
105 if script_name:
106 path_info = path_info.split(script_name, 1)[-1]
107
108 environ['PATH_INFO'] = path_info
109 environ['SCRIPT_NAME'] = script_name
110
111 environ['async.reader'] = self.reader
112 environ['async.writer'] = self.writer
113
114 return environ
115
116 @asyncio.coroutine
117 def handle_request(self, message, payload):
118 """Handle a single HTTP request"""
119 now = self._loop.time()
120
121 if self.readpayload:
122 wsgiinput = io.BytesIO()
123 wsgiinput.write((yield from payload.read()))
124 wsgiinput.seek(0)
125 payload = wsgiinput
126
127 environ = self.create_wsgi_environ(message, payload)
128 response = self.create_wsgi_response(message)
129
130 riter = self.wsgi(environ, response.start_response)
131 if isinstance(riter, asyncio.Future) or inspect.isgenerator(riter):
132 riter = yield from riter
133
134 resp = response.response
135 try:
136 for item in riter:
137 if isinstance(item, asyncio.Future):
138 item = yield from item
139 yield from resp.write(item)
140
141 yield from resp.write_eof()
142 finally:
143 if hasattr(riter, 'close'):
144 riter.close()
145
146 if resp.keep_alive():
147 self.keep_alive(True)
148
149 self.log_access(
150 message, environ, response.response, self._loop.time() - now)
151
152
153 class FileWrapper:
154 """Custom file wrapper."""
155
156 def __init__(self, fobj, chunk_size=8192):
157 self.fobj = fobj
158 self.chunk_size = chunk_size
159 if hasattr(fobj, 'close'):
160 self.close = fobj.close
161
162 def __iter__(self):
163 return self
164
165 def __next__(self):
166 data = self.fobj.read(self.chunk_size)
167 if data:
168 return data
169 raise StopIteration
170
171
172 class WsgiResponse:
173 """Implementation of start_response() callable as specified by PEP 3333"""
174
175 status = None
176
177 HOP_HEADERS = {
178 hdrs.CONNECTION,
179 hdrs.KEEP_ALIVE,
180 hdrs.PROXY_AUTHENTICATE,
181 hdrs.PROXY_AUTHORIZATION,
182 hdrs.TE,
183 hdrs.TRAILER,
184 hdrs.TRANSFER_ENCODING,
185 hdrs.UPGRADE,
186 }
187
188 def __init__(self, writer, message):
189 self.writer = writer
190 self.message = message
191
192 def start_response(self, status, headers, exc_info=None):
193 if exc_info:
194 try:
195 if self.status:
196 raise exc_info[1]
197 finally:
198 exc_info = None
199
200 status_code = int(status.split(' ', 1)[0])
201
202 self.status = status
203 resp = self.response = aiohttp.Response(
204 self.writer, status_code,
205 self.message.version, self.message.should_close)
206 resp.HOP_HEADERS = self.HOP_HEADERS
207 resp.add_headers(*headers)
208
209 if resp.has_chunked_hdr:
210 resp.enable_chunked_encoding()
211
212 # send headers immediately for websocket connection
213 if status_code == 101 and resp.upgrade and resp.websocket:
214 resp.send_headers()
215 else:
216 resp._send_headers = True
217 return self.response.write
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/aiohttp/wsgi.py b/aiohttp/wsgi.py
--- a/aiohttp/wsgi.py
+++ b/aiohttp/wsgi.py
@@ -66,9 +66,7 @@
script_name = self.SCRIPT_NAME
for hdr_name, hdr_value in message.headers.items():
- if hdr_name == 'AUTHORIZATION':
- continue
- elif hdr_name == 'SCRIPT_NAME':
+ if hdr_name == 'SCRIPT_NAME':
script_name = hdr_value
elif hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
|
{"golden_diff": "diff --git a/aiohttp/wsgi.py b/aiohttp/wsgi.py\n--- a/aiohttp/wsgi.py\n+++ b/aiohttp/wsgi.py\n@@ -66,9 +66,7 @@\n script_name = self.SCRIPT_NAME\n \n for hdr_name, hdr_value in message.headers.items():\n- if hdr_name == 'AUTHORIZATION':\n- continue\n- elif hdr_name == 'SCRIPT_NAME':\n+ if hdr_name == 'SCRIPT_NAME':\n script_name = hdr_value\n elif hdr_name == 'CONTENT-TYPE':\n environ['CONTENT_TYPE'] = hdr_value\n", "issue": "aiohttp filtering out \"Authorization\" header\nApparently aiohttp is filtering out the \"Authorization\" header in aiohttp.wsgi:69 in create_wsgi_environ.\n\nThis bug was found while using aiopyramid + jwtauth, you can find more details (and an example project) on https://github.com/housleyjk/aiopyramid/issues/14\n\n", "before_files": [{"content": "\"\"\"wsgi server.\n\nTODO:\n * proxy protocol\n * x-forward security\n * wsgi file support (os.sendfile)\n\"\"\"\n\nimport asyncio\nimport inspect\nimport io\nimport os\nimport sys\nfrom urllib.parse import urlsplit\n\nimport aiohttp\nfrom aiohttp import server, hdrs\n\n__all__ = ('WSGIServerHttpProtocol',)\n\n\nclass WSGIServerHttpProtocol(server.ServerHttpProtocol):\n \"\"\"HTTP Server that implements the Python WSGI protocol.\n\n It uses 'wsgi.async' of 'True'. 'wsgi.input' can behave differently\n depends on 'readpayload' constructor parameter. If readpayload is set to\n True, wsgi server reads all incoming data into BytesIO object and\n sends it as 'wsgi.input' environ var. If readpayload is set to false\n 'wsgi.input' is a StreamReader and application should read incoming\n data with \"yield from environ['wsgi.input'].read()\". It defaults to False.\n \"\"\"\n\n SCRIPT_NAME = os.environ.get('SCRIPT_NAME', '')\n\n def __init__(self, app, readpayload=False, is_ssl=False, *args, **kw):\n super().__init__(*args, **kw)\n\n self.wsgi = app\n self.is_ssl = is_ssl\n self.readpayload = readpayload\n\n def create_wsgi_response(self, message):\n return WsgiResponse(self.writer, message)\n\n def create_wsgi_environ(self, message, payload):\n uri_parts = urlsplit(message.path)\n url_scheme = 'https' if self.is_ssl else 'http'\n\n environ = {\n 'wsgi.input': payload,\n 'wsgi.errors': sys.stderr,\n 'wsgi.version': (1, 0),\n 'wsgi.async': True,\n 'wsgi.multithread': False,\n 'wsgi.multiprocess': False,\n 'wsgi.run_once': False,\n 'wsgi.file_wrapper': FileWrapper,\n 'wsgi.url_scheme': url_scheme,\n 'SERVER_SOFTWARE': aiohttp.HttpMessage.SERVER_SOFTWARE,\n 'REQUEST_METHOD': message.method,\n 'QUERY_STRING': uri_parts.query or '',\n 'RAW_URI': message.path,\n 'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version\n }\n\n script_name = self.SCRIPT_NAME\n\n for hdr_name, hdr_value in message.headers.items():\n if hdr_name == 'AUTHORIZATION':\n continue\n elif hdr_name == 'SCRIPT_NAME':\n script_name = hdr_value\n elif hdr_name == 'CONTENT-TYPE':\n environ['CONTENT_TYPE'] = hdr_value\n continue\n elif hdr_name == 'CONTENT-LENGTH':\n environ['CONTENT_LENGTH'] = hdr_value\n continue\n\n key = 'HTTP_%s' % hdr_name.replace('-', '_')\n if key in environ:\n hdr_value = '%s,%s' % (environ[key], hdr_value)\n\n environ[key] = hdr_value\n\n # authors should be aware that REMOTE_HOST and REMOTE_ADDR\n # may not qualify the remote addr\n # also SERVER_PORT variable MUST be set to the TCP/IP port number on\n # which this request is received from the client.\n # http://www.ietf.org/rfc/rfc3875\n\n remote = self.transport.get_extra_info('peername')\n environ['REMOTE_ADDR'] = remote[0]\n environ['REMOTE_PORT'] = remote[1]\n\n sockname = self.transport.get_extra_info('sockname')\n environ['SERVER_PORT'] = str(sockname[1])\n host = message.headers.get(\"HOST\", None)\n if host:\n environ['SERVER_NAME'] = host.split(\":\")[0]\n else:\n environ['SERVER_NAME'] = sockname[0]\n\n path_info = uri_parts.path\n if script_name:\n path_info = path_info.split(script_name, 1)[-1]\n\n environ['PATH_INFO'] = path_info\n environ['SCRIPT_NAME'] = script_name\n\n environ['async.reader'] = self.reader\n environ['async.writer'] = self.writer\n\n return environ\n\n @asyncio.coroutine\n def handle_request(self, message, payload):\n \"\"\"Handle a single HTTP request\"\"\"\n now = self._loop.time()\n\n if self.readpayload:\n wsgiinput = io.BytesIO()\n wsgiinput.write((yield from payload.read()))\n wsgiinput.seek(0)\n payload = wsgiinput\n\n environ = self.create_wsgi_environ(message, payload)\n response = self.create_wsgi_response(message)\n\n riter = self.wsgi(environ, response.start_response)\n if isinstance(riter, asyncio.Future) or inspect.isgenerator(riter):\n riter = yield from riter\n\n resp = response.response\n try:\n for item in riter:\n if isinstance(item, asyncio.Future):\n item = yield from item\n yield from resp.write(item)\n\n yield from resp.write_eof()\n finally:\n if hasattr(riter, 'close'):\n riter.close()\n\n if resp.keep_alive():\n self.keep_alive(True)\n\n self.log_access(\n message, environ, response.response, self._loop.time() - now)\n\n\nclass FileWrapper:\n \"\"\"Custom file wrapper.\"\"\"\n\n def __init__(self, fobj, chunk_size=8192):\n self.fobj = fobj\n self.chunk_size = chunk_size\n if hasattr(fobj, 'close'):\n self.close = fobj.close\n\n def __iter__(self):\n return self\n\n def __next__(self):\n data = self.fobj.read(self.chunk_size)\n if data:\n return data\n raise StopIteration\n\n\nclass WsgiResponse:\n \"\"\"Implementation of start_response() callable as specified by PEP 3333\"\"\"\n\n status = None\n\n HOP_HEADERS = {\n hdrs.CONNECTION,\n hdrs.KEEP_ALIVE,\n hdrs.PROXY_AUTHENTICATE,\n hdrs.PROXY_AUTHORIZATION,\n hdrs.TE,\n hdrs.TRAILER,\n hdrs.TRANSFER_ENCODING,\n hdrs.UPGRADE,\n }\n\n def __init__(self, writer, message):\n self.writer = writer\n self.message = message\n\n def start_response(self, status, headers, exc_info=None):\n if exc_info:\n try:\n if self.status:\n raise exc_info[1]\n finally:\n exc_info = None\n\n status_code = int(status.split(' ', 1)[0])\n\n self.status = status\n resp = self.response = aiohttp.Response(\n self.writer, status_code,\n self.message.version, self.message.should_close)\n resp.HOP_HEADERS = self.HOP_HEADERS\n resp.add_headers(*headers)\n\n if resp.has_chunked_hdr:\n resp.enable_chunked_encoding()\n\n # send headers immediately for websocket connection\n if status_code == 101 and resp.upgrade and resp.websocket:\n resp.send_headers()\n else:\n resp._send_headers = True\n return self.response.write\n", "path": "aiohttp/wsgi.py"}], "after_files": [{"content": "\"\"\"wsgi server.\n\nTODO:\n * proxy protocol\n * x-forward security\n * wsgi file support (os.sendfile)\n\"\"\"\n\nimport asyncio\nimport inspect\nimport io\nimport os\nimport sys\nfrom urllib.parse import urlsplit\n\nimport aiohttp\nfrom aiohttp import server, hdrs\n\n__all__ = ('WSGIServerHttpProtocol',)\n\n\nclass WSGIServerHttpProtocol(server.ServerHttpProtocol):\n \"\"\"HTTP Server that implements the Python WSGI protocol.\n\n It uses 'wsgi.async' of 'True'. 'wsgi.input' can behave differently\n depends on 'readpayload' constructor parameter. If readpayload is set to\n True, wsgi server reads all incoming data into BytesIO object and\n sends it as 'wsgi.input' environ var. If readpayload is set to false\n 'wsgi.input' is a StreamReader and application should read incoming\n data with \"yield from environ['wsgi.input'].read()\". It defaults to False.\n \"\"\"\n\n SCRIPT_NAME = os.environ.get('SCRIPT_NAME', '')\n\n def __init__(self, app, readpayload=False, is_ssl=False, *args, **kw):\n super().__init__(*args, **kw)\n\n self.wsgi = app\n self.is_ssl = is_ssl\n self.readpayload = readpayload\n\n def create_wsgi_response(self, message):\n return WsgiResponse(self.writer, message)\n\n def create_wsgi_environ(self, message, payload):\n uri_parts = urlsplit(message.path)\n url_scheme = 'https' if self.is_ssl else 'http'\n\n environ = {\n 'wsgi.input': payload,\n 'wsgi.errors': sys.stderr,\n 'wsgi.version': (1, 0),\n 'wsgi.async': True,\n 'wsgi.multithread': False,\n 'wsgi.multiprocess': False,\n 'wsgi.run_once': False,\n 'wsgi.file_wrapper': FileWrapper,\n 'wsgi.url_scheme': url_scheme,\n 'SERVER_SOFTWARE': aiohttp.HttpMessage.SERVER_SOFTWARE,\n 'REQUEST_METHOD': message.method,\n 'QUERY_STRING': uri_parts.query or '',\n 'RAW_URI': message.path,\n 'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version\n }\n\n script_name = self.SCRIPT_NAME\n\n for hdr_name, hdr_value in message.headers.items():\n if hdr_name == 'SCRIPT_NAME':\n script_name = hdr_value\n elif hdr_name == 'CONTENT-TYPE':\n environ['CONTENT_TYPE'] = hdr_value\n continue\n elif hdr_name == 'CONTENT-LENGTH':\n environ['CONTENT_LENGTH'] = hdr_value\n continue\n\n key = 'HTTP_%s' % hdr_name.replace('-', '_')\n if key in environ:\n hdr_value = '%s,%s' % (environ[key], hdr_value)\n\n environ[key] = hdr_value\n\n # authors should be aware that REMOTE_HOST and REMOTE_ADDR\n # may not qualify the remote addr\n # also SERVER_PORT variable MUST be set to the TCP/IP port number on\n # which this request is received from the client.\n # http://www.ietf.org/rfc/rfc3875\n\n remote = self.transport.get_extra_info('peername')\n environ['REMOTE_ADDR'] = remote[0]\n environ['REMOTE_PORT'] = remote[1]\n\n sockname = self.transport.get_extra_info('sockname')\n environ['SERVER_PORT'] = str(sockname[1])\n host = message.headers.get(\"HOST\", None)\n if host:\n environ['SERVER_NAME'] = host.split(\":\")[0]\n else:\n environ['SERVER_NAME'] = sockname[0]\n\n path_info = uri_parts.path\n if script_name:\n path_info = path_info.split(script_name, 1)[-1]\n\n environ['PATH_INFO'] = path_info\n environ['SCRIPT_NAME'] = script_name\n\n environ['async.reader'] = self.reader\n environ['async.writer'] = self.writer\n\n return environ\n\n @asyncio.coroutine\n def handle_request(self, message, payload):\n \"\"\"Handle a single HTTP request\"\"\"\n now = self._loop.time()\n\n if self.readpayload:\n wsgiinput = io.BytesIO()\n wsgiinput.write((yield from payload.read()))\n wsgiinput.seek(0)\n payload = wsgiinput\n\n environ = self.create_wsgi_environ(message, payload)\n response = self.create_wsgi_response(message)\n\n riter = self.wsgi(environ, response.start_response)\n if isinstance(riter, asyncio.Future) or inspect.isgenerator(riter):\n riter = yield from riter\n\n resp = response.response\n try:\n for item in riter:\n if isinstance(item, asyncio.Future):\n item = yield from item\n yield from resp.write(item)\n\n yield from resp.write_eof()\n finally:\n if hasattr(riter, 'close'):\n riter.close()\n\n if resp.keep_alive():\n self.keep_alive(True)\n\n self.log_access(\n message, environ, response.response, self._loop.time() - now)\n\n\nclass FileWrapper:\n \"\"\"Custom file wrapper.\"\"\"\n\n def __init__(self, fobj, chunk_size=8192):\n self.fobj = fobj\n self.chunk_size = chunk_size\n if hasattr(fobj, 'close'):\n self.close = fobj.close\n\n def __iter__(self):\n return self\n\n def __next__(self):\n data = self.fobj.read(self.chunk_size)\n if data:\n return data\n raise StopIteration\n\n\nclass WsgiResponse:\n \"\"\"Implementation of start_response() callable as specified by PEP 3333\"\"\"\n\n status = None\n\n HOP_HEADERS = {\n hdrs.CONNECTION,\n hdrs.KEEP_ALIVE,\n hdrs.PROXY_AUTHENTICATE,\n hdrs.PROXY_AUTHORIZATION,\n hdrs.TE,\n hdrs.TRAILER,\n hdrs.TRANSFER_ENCODING,\n hdrs.UPGRADE,\n }\n\n def __init__(self, writer, message):\n self.writer = writer\n self.message = message\n\n def start_response(self, status, headers, exc_info=None):\n if exc_info:\n try:\n if self.status:\n raise exc_info[1]\n finally:\n exc_info = None\n\n status_code = int(status.split(' ', 1)[0])\n\n self.status = status\n resp = self.response = aiohttp.Response(\n self.writer, status_code,\n self.message.version, self.message.should_close)\n resp.HOP_HEADERS = self.HOP_HEADERS\n resp.add_headers(*headers)\n\n if resp.has_chunked_hdr:\n resp.enable_chunked_encoding()\n\n # send headers immediately for websocket connection\n if status_code == 101 and resp.upgrade and resp.websocket:\n resp.send_headers()\n else:\n resp._send_headers = True\n return self.response.write\n", "path": "aiohttp/wsgi.py"}]}
| 2,424 | 130 |
gh_patches_debug_5923
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-488
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Featured careeropprotunities are not featured
The featured opportunities are not prioritized over other opportunities.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/careeropportunity/views.py`
Content:
```
1 #-*- coding: utf-8 -*-
2 from django.utils import timezone
3
4 from datetime import datetime
5
6 from django.conf import settings
7 from django.shortcuts import render_to_response
8 from django.shortcuts import get_object_or_404
9 from django.template import RequestContext
10
11 from apps.careeropportunity.models import CareerOpportunity
12
13
14 def index(request):
15 opportunities = CareerOpportunity.objects.filter(
16 start__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')
17
18 return render_to_response('careeropportunity/index.html', \
19 {'opportunities': opportunities}, \
20 context_instance=RequestContext(request))
21
22
23 def details(request, opportunity_id):
24 opportunity = get_object_or_404(CareerOpportunity, pk=opportunity_id)
25
26 return render_to_response('careeropportunity/details.html', \
27 {'opportunity': opportunity}, \
28 context_instance=RequestContext(request))
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/careeropportunity/views.py b/apps/careeropportunity/views.py
--- a/apps/careeropportunity/views.py
+++ b/apps/careeropportunity/views.py
@@ -13,7 +13,7 @@
def index(request):
opportunities = CareerOpportunity.objects.filter(
- start__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')
+ start__lte=timezone.now(), end__gte=timezone.now()).order_by('-featured', '-start')
return render_to_response('careeropportunity/index.html', \
{'opportunities': opportunities}, \
|
{"golden_diff": "diff --git a/apps/careeropportunity/views.py b/apps/careeropportunity/views.py\n--- a/apps/careeropportunity/views.py\n+++ b/apps/careeropportunity/views.py\n@@ -13,7 +13,7 @@\n \n def index(request):\n opportunities = CareerOpportunity.objects.filter(\n- \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')\n+ \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('-featured', '-start')\n \n return render_to_response('careeropportunity/index.html', \\\n {'opportunities': opportunities}, \\\n", "issue": "Featured careeropprotunities are not featured\nThe featured opportunities are not prioritized over other opportunities. \n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom django.utils import timezone\n\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404\nfrom django.template import RequestContext\n\nfrom apps.careeropportunity.models import CareerOpportunity\n\n\ndef index(request):\n opportunities = CareerOpportunity.objects.filter(\n \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')\n \n return render_to_response('careeropportunity/index.html', \\\n {'opportunities': opportunities}, \\\n context_instance=RequestContext(request))\n\n\ndef details(request, opportunity_id):\n opportunity = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n\n return render_to_response('careeropportunity/details.html', \\\n {'opportunity': opportunity}, \\\n context_instance=RequestContext(request))\n", "path": "apps/careeropportunity/views.py"}], "after_files": [{"content": "#-*- coding: utf-8 -*-\nfrom django.utils import timezone\n\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404\nfrom django.template import RequestContext\n\nfrom apps.careeropportunity.models import CareerOpportunity\n\n\ndef index(request):\n opportunities = CareerOpportunity.objects.filter(\n \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('-featured', '-start')\n \n return render_to_response('careeropportunity/index.html', \\\n {'opportunities': opportunities}, \\\n context_instance=RequestContext(request))\n\n\ndef details(request, opportunity_id):\n opportunity = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n\n return render_to_response('careeropportunity/details.html', \\\n {'opportunity': opportunity}, \\\n context_instance=RequestContext(request))\n", "path": "apps/careeropportunity/views.py"}]}
| 528 | 140 |
gh_patches_debug_36979
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1389
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rest plugins should know which page is being compiled
I want to access some of the metadata of the current page from a rest plugin and I can't find a way to get the current page in self.state.
I found out that by adding a reference to the "source" path when calling the rest compiler, I can retrieve it as self.state.document.settings._source, then find a matching page. Is it a good solution? Could this (or something similar) be integrated in the default rest compiler?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/rest/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import unicode_literals
28 import io
29 import os
30 import re
31
32 try:
33 import docutils.core
34 import docutils.nodes
35 import docutils.utils
36 import docutils.io
37 import docutils.readers.standalone
38 import docutils.writers.html4css1
39 has_docutils = True
40 except ImportError:
41 has_docutils = False
42
43 from nikola.plugin_categories import PageCompiler
44 from nikola.utils import get_logger, makedirs, req_missing, write_metadata
45
46
47 class CompileRest(PageCompiler):
48 """Compile reSt into HTML."""
49
50 name = "rest"
51 demote_headers = True
52 logger = None
53
54 def compile_html(self, source, dest, is_two_file=True):
55 """Compile reSt into HTML."""
56
57 if not has_docutils:
58 req_missing(['docutils'], 'build this site (compile reStructuredText)')
59 makedirs(os.path.dirname(dest))
60 error_level = 100
61 with io.open(dest, "w+", encoding="utf8") as out_file:
62 with io.open(source, "r", encoding="utf8") as in_file:
63 data = in_file.read()
64 add_ln = 0
65 if not is_two_file:
66 spl = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)
67 data = spl[-1]
68 if len(spl) != 1:
69 # If errors occur, this will be added to the line
70 # number reported by docutils so the line number
71 # matches the actual line number (off by 7 with default
72 # metadata, could be more or less depending on the post
73 # author).
74 add_ln = len(spl[0].splitlines()) + 1
75
76 default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt')
77 output, error_level, deps = rst2html(
78 data, settings_overrides={
79 'initial_header_level': 1,
80 'record_dependencies': True,
81 'stylesheet_path': None,
82 'link_stylesheet': True,
83 'syntax_highlight': 'short',
84 'math_output': 'mathjax',
85 'template': default_template_path,
86 }, logger=self.logger, l_source=source, l_add_ln=add_ln)
87 out_file.write(output)
88 deps_path = dest + '.dep'
89 if deps.list:
90 with io.open(deps_path, "w+", encoding="utf8") as deps_file:
91 deps_file.write('\n'.join(deps.list))
92 else:
93 if os.path.isfile(deps_path):
94 os.unlink(deps_path)
95 if error_level < 3:
96 return True
97 else:
98 return False
99
100 def create_post(self, path, **kw):
101 content = kw.pop('content', None)
102 onefile = kw.pop('onefile', False)
103 # is_page is not used by create_post as of now.
104 kw.pop('is_page', False)
105 metadata = {}
106 metadata.update(self.default_metadata)
107 metadata.update(kw)
108 makedirs(os.path.dirname(path))
109 if not content.endswith('\n'):
110 content += '\n'
111 with io.open(path, "w+", encoding="utf8") as fd:
112 if onefile:
113 fd.write(write_metadata(metadata))
114 fd.write('\n' + content)
115
116 def set_site(self, site):
117 for plugin_info in site.plugin_manager.getPluginsOfCategory("RestExtension"):
118 if plugin_info.name in site.config['DISABLED_PLUGINS']:
119 site.plugin_manager.removePluginFromCategory(plugin_info, "RestExtension")
120 continue
121
122 site.plugin_manager.activatePluginByName(plugin_info.name)
123 plugin_info.plugin_object.set_site(site)
124 plugin_info.plugin_object.short_help = plugin_info.description
125
126 self.logger = get_logger('compile_rest', site.loghandlers)
127 if not site.debug:
128 self.logger.level = 4
129
130 return super(CompileRest, self).set_site(site)
131
132
133 def get_observer(settings):
134 """Return an observer for the docutils Reporter."""
135 def observer(msg):
136 """Report docutils/rest messages to a Nikola user.
137
138 Error code mapping:
139
140 +------+---------+------+----------+
141 | dNUM | dNAME | lNUM | lNAME | d = docutils, l = logbook
142 +------+---------+------+----------+
143 | 0 | DEBUG | 1 | DEBUG |
144 | 1 | INFO | 2 | INFO |
145 | 2 | WARNING | 4 | WARNING |
146 | 3 | ERROR | 5 | ERROR |
147 | 4 | SEVERE | 6 | CRITICAL |
148 +------+---------+------+----------+
149 """
150 errormap = {0: 1, 1: 2, 2: 4, 3: 5, 4: 6}
151 text = docutils.nodes.Element.astext(msg)
152 line = msg['line'] + settings['add_ln'] if 'line' in msg else 0
153 out = '[{source}{colon}{line}] {text}'.format(
154 source=settings['source'], colon=(':' if line else ''),
155 line=line, text=text)
156 settings['logger'].log(errormap[msg['level']], out)
157
158 return observer
159
160
161 class NikolaReader(docutils.readers.standalone.Reader):
162
163 def new_document(self):
164 """Create and return a new empty document tree (root node)."""
165 document = docutils.utils.new_document(self.source.source_path, self.settings)
166 document.reporter.stream = False
167 document.reporter.attach_observer(get_observer(self.l_settings))
168 return document
169
170
171 def add_node(node, visit_function=None, depart_function=None):
172 """
173 Register a Docutils node class.
174 This function is completely optional. It is a same concept as
175 `Sphinx add_node function <http://sphinx-doc.org/ext/appapi.html#sphinx.application.Sphinx.add_node>`_.
176
177 For example::
178
179 class Plugin(RestExtension):
180
181 name = "rest_math"
182
183 def set_site(self, site):
184 self.site = site
185 directives.register_directive('math', MathDirective)
186 add_node(MathBlock, visit_Math, depart_Math)
187 return super(Plugin, self).set_site(site)
188
189 class MathDirective(Directive):
190 def run(self):
191 node = MathBlock()
192 return [node]
193
194 class Math(docutils.nodes.Element): pass
195
196 def visit_Math(self, node):
197 self.body.append(self.starttag(node, 'math'))
198
199 def depart_Math(self, node):
200 self.body.append('</math>')
201
202 For full example, you can refer to `Microdata plugin <http://plugins.getnikola.com/#microdata>`_
203 """
204 docutils.nodes._add_node_class_names([node.__name__])
205 if visit_function:
206 setattr(docutils.writers.html4css1.HTMLTranslator, 'visit_' + node.__name__, visit_function)
207 if depart_function:
208 setattr(docutils.writers.html4css1.HTMLTranslator, 'depart_' + node.__name__, depart_function)
209
210
211 def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
212 destination_path=None, reader=None,
213 parser=None, parser_name='restructuredtext', writer=None,
214 writer_name='html', settings=None, settings_spec=None,
215 settings_overrides=None, config_section=None,
216 enable_exit_status=None, logger=None, l_source='', l_add_ln=0):
217 """
218 Set up & run a `Publisher`, and return a dictionary of document parts.
219 Dictionary keys are the names of parts, and values are Unicode strings;
220 encoding is up to the client. For programmatic use with string I/O.
221
222 For encoded string input, be sure to set the 'input_encoding' setting to
223 the desired encoding. Set it to 'unicode' for unencoded Unicode string
224 input. Here's how::
225
226 publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
227
228 Parameters: see `publish_programmatically`.
229
230 WARNING: `reader` should be None (or NikolaReader()) if you want Nikola to report
231 reStructuredText syntax errors.
232 """
233 if reader is None:
234 reader = NikolaReader()
235 # For our custom logging, we have special needs and special settings we
236 # specify here.
237 # logger a logger from Nikola
238 # source source filename (docutils gets a string)
239 # add_ln amount of metadata lines (see comment in compile_html above)
240 reader.l_settings = {'logger': logger, 'source': l_source,
241 'add_ln': l_add_ln}
242
243 pub = docutils.core.Publisher(reader, parser, writer, settings=settings,
244 source_class=source_class,
245 destination_class=docutils.io.StringOutput)
246 pub.set_components(None, parser_name, writer_name)
247 pub.process_programmatic_settings(
248 settings_spec, settings_overrides, config_section)
249 pub.set_source(source, source_path)
250 pub.set_destination(None, destination_path)
251 pub.publish(enable_exit_status=enable_exit_status)
252
253 return pub.writer.parts['docinfo'] + pub.writer.parts['fragment'], pub.document.reporter.max_level, pub.settings.record_dependencies
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/compile/rest/__init__.py b/nikola/plugins/compile/rest/__init__.py
--- a/nikola/plugins/compile/rest/__init__.py
+++ b/nikola/plugins/compile/rest/__init__.py
@@ -83,7 +83,7 @@
'syntax_highlight': 'short',
'math_output': 'mathjax',
'template': default_template_path,
- }, logger=self.logger, l_source=source, l_add_ln=add_ln)
+ }, logger=self.logger, source_path=source, l_add_ln=add_ln)
out_file.write(output)
deps_path = dest + '.dep'
if deps.list:
@@ -213,7 +213,7 @@
parser=None, parser_name='restructuredtext', writer=None,
writer_name='html', settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
- enable_exit_status=None, logger=None, l_source='', l_add_ln=0):
+ enable_exit_status=None, logger=None, l_add_ln=0):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
@@ -237,7 +237,7 @@
# logger a logger from Nikola
# source source filename (docutils gets a string)
# add_ln amount of metadata lines (see comment in compile_html above)
- reader.l_settings = {'logger': logger, 'source': l_source,
+ reader.l_settings = {'logger': logger, 'source': source_path,
'add_ln': l_add_ln}
pub = docutils.core.Publisher(reader, parser, writer, settings=settings,
@@ -246,7 +246,8 @@
pub.set_components(None, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
- pub.set_source(source, source_path)
+ pub.set_source(source, None)
+ pub.settings._nikola_source_path = source_path
pub.set_destination(None, destination_path)
pub.publish(enable_exit_status=enable_exit_status)
|
{"golden_diff": "diff --git a/nikola/plugins/compile/rest/__init__.py b/nikola/plugins/compile/rest/__init__.py\n--- a/nikola/plugins/compile/rest/__init__.py\n+++ b/nikola/plugins/compile/rest/__init__.py\n@@ -83,7 +83,7 @@\n 'syntax_highlight': 'short',\n 'math_output': 'mathjax',\n 'template': default_template_path,\n- }, logger=self.logger, l_source=source, l_add_ln=add_ln)\n+ }, logger=self.logger, source_path=source, l_add_ln=add_ln)\n out_file.write(output)\n deps_path = dest + '.dep'\n if deps.list:\n@@ -213,7 +213,7 @@\n parser=None, parser_name='restructuredtext', writer=None,\n writer_name='html', settings=None, settings_spec=None,\n settings_overrides=None, config_section=None,\n- enable_exit_status=None, logger=None, l_source='', l_add_ln=0):\n+ enable_exit_status=None, logger=None, l_add_ln=0):\n \"\"\"\n Set up & run a `Publisher`, and return a dictionary of document parts.\n Dictionary keys are the names of parts, and values are Unicode strings;\n@@ -237,7 +237,7 @@\n # logger a logger from Nikola\n # source source filename (docutils gets a string)\n # add_ln amount of metadata lines (see comment in compile_html above)\n- reader.l_settings = {'logger': logger, 'source': l_source,\n+ reader.l_settings = {'logger': logger, 'source': source_path,\n 'add_ln': l_add_ln}\n \n pub = docutils.core.Publisher(reader, parser, writer, settings=settings,\n@@ -246,7 +246,8 @@\n pub.set_components(None, parser_name, writer_name)\n pub.process_programmatic_settings(\n settings_spec, settings_overrides, config_section)\n- pub.set_source(source, source_path)\n+ pub.set_source(source, None)\n+ pub.settings._nikola_source_path = source_path\n pub.set_destination(None, destination_path)\n pub.publish(enable_exit_status=enable_exit_status)\n", "issue": "rest plugins should know which page is being compiled\nI want to access some of the metadata of the current page from a rest plugin and I can't find a way to get the current page in self.state.\n\nI found out that by adding a reference to the \"source\" path when calling the rest compiler, I can retrieve it as self.state.document.settings._source, then find a matching page. Is it a good solution? Could this (or something similar) be integrated in the default rest compiler?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals\nimport io\nimport os\nimport re\n\ntry:\n import docutils.core\n import docutils.nodes\n import docutils.utils\n import docutils.io\n import docutils.readers.standalone\n import docutils.writers.html4css1\n has_docutils = True\nexcept ImportError:\n has_docutils = False\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import get_logger, makedirs, req_missing, write_metadata\n\n\nclass CompileRest(PageCompiler):\n \"\"\"Compile reSt into HTML.\"\"\"\n\n name = \"rest\"\n demote_headers = True\n logger = None\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile reSt into HTML.\"\"\"\n\n if not has_docutils:\n req_missing(['docutils'], 'build this site (compile reStructuredText)')\n makedirs(os.path.dirname(dest))\n error_level = 100\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n add_ln = 0\n if not is_two_file:\n spl = re.split('(\\n\\n|\\r\\n\\r\\n)', data, maxsplit=1)\n data = spl[-1]\n if len(spl) != 1:\n # If errors occur, this will be added to the line\n # number reported by docutils so the line number\n # matches the actual line number (off by 7 with default\n # metadata, could be more or less depending on the post\n # author).\n add_ln = len(spl[0].splitlines()) + 1\n\n default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt')\n output, error_level, deps = rst2html(\n data, settings_overrides={\n 'initial_header_level': 1,\n 'record_dependencies': True,\n 'stylesheet_path': None,\n 'link_stylesheet': True,\n 'syntax_highlight': 'short',\n 'math_output': 'mathjax',\n 'template': default_template_path,\n }, logger=self.logger, l_source=source, l_add_ln=add_ln)\n out_file.write(output)\n deps_path = dest + '.dep'\n if deps.list:\n with io.open(deps_path, \"w+\", encoding=\"utf8\") as deps_file:\n deps_file.write('\\n'.join(deps.list))\n else:\n if os.path.isfile(deps_path):\n os.unlink(deps_path)\n if error_level < 3:\n return True\n else:\n return False\n\n def create_post(self, path, **kw):\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write(write_metadata(metadata))\n fd.write('\\n' + content)\n\n def set_site(self, site):\n for plugin_info in site.plugin_manager.getPluginsOfCategory(\"RestExtension\"):\n if plugin_info.name in site.config['DISABLED_PLUGINS']:\n site.plugin_manager.removePluginFromCategory(plugin_info, \"RestExtension\")\n continue\n\n site.plugin_manager.activatePluginByName(plugin_info.name)\n plugin_info.plugin_object.set_site(site)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.logger = get_logger('compile_rest', site.loghandlers)\n if not site.debug:\n self.logger.level = 4\n\n return super(CompileRest, self).set_site(site)\n\n\ndef get_observer(settings):\n \"\"\"Return an observer for the docutils Reporter.\"\"\"\n def observer(msg):\n \"\"\"Report docutils/rest messages to a Nikola user.\n\n Error code mapping:\n\n +------+---------+------+----------+\n | dNUM | dNAME | lNUM | lNAME | d = docutils, l = logbook\n +------+---------+------+----------+\n | 0 | DEBUG | 1 | DEBUG |\n | 1 | INFO | 2 | INFO |\n | 2 | WARNING | 4 | WARNING |\n | 3 | ERROR | 5 | ERROR |\n | 4 | SEVERE | 6 | CRITICAL |\n +------+---------+------+----------+\n \"\"\"\n errormap = {0: 1, 1: 2, 2: 4, 3: 5, 4: 6}\n text = docutils.nodes.Element.astext(msg)\n line = msg['line'] + settings['add_ln'] if 'line' in msg else 0\n out = '[{source}{colon}{line}] {text}'.format(\n source=settings['source'], colon=(':' if line else ''),\n line=line, text=text)\n settings['logger'].log(errormap[msg['level']], out)\n\n return observer\n\n\nclass NikolaReader(docutils.readers.standalone.Reader):\n\n def new_document(self):\n \"\"\"Create and return a new empty document tree (root node).\"\"\"\n document = docutils.utils.new_document(self.source.source_path, self.settings)\n document.reporter.stream = False\n document.reporter.attach_observer(get_observer(self.l_settings))\n return document\n\n\ndef add_node(node, visit_function=None, depart_function=None):\n \"\"\"\n Register a Docutils node class.\n This function is completely optional. It is a same concept as\n `Sphinx add_node function <http://sphinx-doc.org/ext/appapi.html#sphinx.application.Sphinx.add_node>`_.\n\n For example::\n\n class Plugin(RestExtension):\n\n name = \"rest_math\"\n\n def set_site(self, site):\n self.site = site\n directives.register_directive('math', MathDirective)\n add_node(MathBlock, visit_Math, depart_Math)\n return super(Plugin, self).set_site(site)\n\n class MathDirective(Directive):\n def run(self):\n node = MathBlock()\n return [node]\n\n class Math(docutils.nodes.Element): pass\n\n def visit_Math(self, node):\n self.body.append(self.starttag(node, 'math'))\n\n def depart_Math(self, node):\n self.body.append('</math>')\n\n For full example, you can refer to `Microdata plugin <http://plugins.getnikola.com/#microdata>`_\n \"\"\"\n docutils.nodes._add_node_class_names([node.__name__])\n if visit_function:\n setattr(docutils.writers.html4css1.HTMLTranslator, 'visit_' + node.__name__, visit_function)\n if depart_function:\n setattr(docutils.writers.html4css1.HTMLTranslator, 'depart_' + node.__name__, depart_function)\n\n\ndef rst2html(source, source_path=None, source_class=docutils.io.StringInput,\n destination_path=None, reader=None,\n parser=None, parser_name='restructuredtext', writer=None,\n writer_name='html', settings=None, settings_spec=None,\n settings_overrides=None, config_section=None,\n enable_exit_status=None, logger=None, l_source='', l_add_ln=0):\n \"\"\"\n Set up & run a `Publisher`, and return a dictionary of document parts.\n Dictionary keys are the names of parts, and values are Unicode strings;\n encoding is up to the client. For programmatic use with string I/O.\n\n For encoded string input, be sure to set the 'input_encoding' setting to\n the desired encoding. Set it to 'unicode' for unencoded Unicode string\n input. Here's how::\n\n publish_parts(..., settings_overrides={'input_encoding': 'unicode'})\n\n Parameters: see `publish_programmatically`.\n\n WARNING: `reader` should be None (or NikolaReader()) if you want Nikola to report\n reStructuredText syntax errors.\n \"\"\"\n if reader is None:\n reader = NikolaReader()\n # For our custom logging, we have special needs and special settings we\n # specify here.\n # logger a logger from Nikola\n # source source filename (docutils gets a string)\n # add_ln amount of metadata lines (see comment in compile_html above)\n reader.l_settings = {'logger': logger, 'source': l_source,\n 'add_ln': l_add_ln}\n\n pub = docutils.core.Publisher(reader, parser, writer, settings=settings,\n source_class=source_class,\n destination_class=docutils.io.StringOutput)\n pub.set_components(None, parser_name, writer_name)\n pub.process_programmatic_settings(\n settings_spec, settings_overrides, config_section)\n pub.set_source(source, source_path)\n pub.set_destination(None, destination_path)\n pub.publish(enable_exit_status=enable_exit_status)\n\n return pub.writer.parts['docinfo'] + pub.writer.parts['fragment'], pub.document.reporter.max_level, pub.settings.record_dependencies\n", "path": "nikola/plugins/compile/rest/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals\nimport io\nimport os\nimport re\n\ntry:\n import docutils.core\n import docutils.nodes\n import docutils.utils\n import docutils.io\n import docutils.readers.standalone\n import docutils.writers.html4css1\n has_docutils = True\nexcept ImportError:\n has_docutils = False\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import get_logger, makedirs, req_missing, write_metadata\n\n\nclass CompileRest(PageCompiler):\n \"\"\"Compile reSt into HTML.\"\"\"\n\n name = \"rest\"\n demote_headers = True\n logger = None\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile reSt into HTML.\"\"\"\n\n if not has_docutils:\n req_missing(['docutils'], 'build this site (compile reStructuredText)')\n makedirs(os.path.dirname(dest))\n error_level = 100\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n add_ln = 0\n if not is_two_file:\n spl = re.split('(\\n\\n|\\r\\n\\r\\n)', data, maxsplit=1)\n data = spl[-1]\n if len(spl) != 1:\n # If errors occur, this will be added to the line\n # number reported by docutils so the line number\n # matches the actual line number (off by 7 with default\n # metadata, could be more or less depending on the post\n # author).\n add_ln = len(spl[0].splitlines()) + 1\n\n default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt')\n output, error_level, deps = rst2html(\n data, settings_overrides={\n 'initial_header_level': 1,\n 'record_dependencies': True,\n 'stylesheet_path': None,\n 'link_stylesheet': True,\n 'syntax_highlight': 'short',\n 'math_output': 'mathjax',\n 'template': default_template_path,\n }, logger=self.logger, source_path=source, l_add_ln=add_ln)\n out_file.write(output)\n deps_path = dest + '.dep'\n if deps.list:\n with io.open(deps_path, \"w+\", encoding=\"utf8\") as deps_file:\n deps_file.write('\\n'.join(deps.list))\n else:\n if os.path.isfile(deps_path):\n os.unlink(deps_path)\n if error_level < 3:\n return True\n else:\n return False\n\n def create_post(self, path, **kw):\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write(write_metadata(metadata))\n fd.write('\\n' + content)\n\n def set_site(self, site):\n for plugin_info in site.plugin_manager.getPluginsOfCategory(\"RestExtension\"):\n if plugin_info.name in site.config['DISABLED_PLUGINS']:\n site.plugin_manager.removePluginFromCategory(plugin_info, \"RestExtension\")\n continue\n\n site.plugin_manager.activatePluginByName(plugin_info.name)\n plugin_info.plugin_object.set_site(site)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.logger = get_logger('compile_rest', site.loghandlers)\n if not site.debug:\n self.logger.level = 4\n\n return super(CompileRest, self).set_site(site)\n\n\ndef get_observer(settings):\n \"\"\"Return an observer for the docutils Reporter.\"\"\"\n def observer(msg):\n \"\"\"Report docutils/rest messages to a Nikola user.\n\n Error code mapping:\n\n +------+---------+------+----------+\n | dNUM | dNAME | lNUM | lNAME | d = docutils, l = logbook\n +------+---------+------+----------+\n | 0 | DEBUG | 1 | DEBUG |\n | 1 | INFO | 2 | INFO |\n | 2 | WARNING | 4 | WARNING |\n | 3 | ERROR | 5 | ERROR |\n | 4 | SEVERE | 6 | CRITICAL |\n +------+---------+------+----------+\n \"\"\"\n errormap = {0: 1, 1: 2, 2: 4, 3: 5, 4: 6}\n text = docutils.nodes.Element.astext(msg)\n line = msg['line'] + settings['add_ln'] if 'line' in msg else 0\n out = '[{source}{colon}{line}] {text}'.format(\n source=settings['source'], colon=(':' if line else ''),\n line=line, text=text)\n settings['logger'].log(errormap[msg['level']], out)\n\n return observer\n\n\nclass NikolaReader(docutils.readers.standalone.Reader):\n\n def new_document(self):\n \"\"\"Create and return a new empty document tree (root node).\"\"\"\n document = docutils.utils.new_document(self.source.source_path, self.settings)\n document.reporter.stream = False\n document.reporter.attach_observer(get_observer(self.l_settings))\n return document\n\n\ndef add_node(node, visit_function=None, depart_function=None):\n \"\"\"\n Register a Docutils node class.\n This function is completely optional. It is a same concept as\n `Sphinx add_node function <http://sphinx-doc.org/ext/appapi.html#sphinx.application.Sphinx.add_node>`_.\n\n For example::\n\n class Plugin(RestExtension):\n\n name = \"rest_math\"\n\n def set_site(self, site):\n self.site = site\n directives.register_directive('math', MathDirective)\n add_node(MathBlock, visit_Math, depart_Math)\n return super(Plugin, self).set_site(site)\n\n class MathDirective(Directive):\n def run(self):\n node = MathBlock()\n return [node]\n\n class Math(docutils.nodes.Element): pass\n\n def visit_Math(self, node):\n self.body.append(self.starttag(node, 'math'))\n\n def depart_Math(self, node):\n self.body.append('</math>')\n\n For full example, you can refer to `Microdata plugin <http://plugins.getnikola.com/#microdata>`_\n \"\"\"\n docutils.nodes._add_node_class_names([node.__name__])\n if visit_function:\n setattr(docutils.writers.html4css1.HTMLTranslator, 'visit_' + node.__name__, visit_function)\n if depart_function:\n setattr(docutils.writers.html4css1.HTMLTranslator, 'depart_' + node.__name__, depart_function)\n\n\ndef rst2html(source, source_path=None, source_class=docutils.io.StringInput,\n destination_path=None, reader=None,\n parser=None, parser_name='restructuredtext', writer=None,\n writer_name='html', settings=None, settings_spec=None,\n settings_overrides=None, config_section=None,\n enable_exit_status=None, logger=None, l_add_ln=0):\n \"\"\"\n Set up & run a `Publisher`, and return a dictionary of document parts.\n Dictionary keys are the names of parts, and values are Unicode strings;\n encoding is up to the client. For programmatic use with string I/O.\n\n For encoded string input, be sure to set the 'input_encoding' setting to\n the desired encoding. Set it to 'unicode' for unencoded Unicode string\n input. Here's how::\n\n publish_parts(..., settings_overrides={'input_encoding': 'unicode'})\n\n Parameters: see `publish_programmatically`.\n\n WARNING: `reader` should be None (or NikolaReader()) if you want Nikola to report\n reStructuredText syntax errors.\n \"\"\"\n if reader is None:\n reader = NikolaReader()\n # For our custom logging, we have special needs and special settings we\n # specify here.\n # logger a logger from Nikola\n # source source filename (docutils gets a string)\n # add_ln amount of metadata lines (see comment in compile_html above)\n reader.l_settings = {'logger': logger, 'source': source_path,\n 'add_ln': l_add_ln}\n\n pub = docutils.core.Publisher(reader, parser, writer, settings=settings,\n source_class=source_class,\n destination_class=docutils.io.StringOutput)\n pub.set_components(None, parser_name, writer_name)\n pub.process_programmatic_settings(\n settings_spec, settings_overrides, config_section)\n pub.set_source(source, None)\n pub.settings._nikola_source_path = source_path\n pub.set_destination(None, destination_path)\n pub.publish(enable_exit_status=enable_exit_status)\n\n return pub.writer.parts['docinfo'] + pub.writer.parts['fragment'], pub.document.reporter.max_level, pub.settings.record_dependencies\n", "path": "nikola/plugins/compile/rest/__init__.py"}]}
| 3,278 | 479 |
gh_patches_debug_16266
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1149
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] [CV] typo in context prediction validation
**Describe the bug**
it says batch_to_images instead of infer_on_batch

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/vision/context.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module for base vision context."""
12 import logging
13 from typing import Mapping, Union
14
15 import torch
16 from torch import nn
17 from ignite.metrics import Metric
18
19 from deepchecks.core import DatasetKind
20 from deepchecks.vision.vision_data import VisionData, TaskType
21 from deepchecks.core.errors import (
22 DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,
23 DeepchecksNotSupportedError, DeepchecksValueError, ValidationError
24 )
25
26
27 __all__ = ['Context']
28
29
30 logger = logging.getLogger('deepchecks')
31
32
33 class Context:
34 """Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.
35
36 Parameters
37 ----------
38 train : VisionData , default: None
39 Dataset or DataFrame object, representing data an estimator was fitted on
40 test : VisionData , default: None
41 Dataset or DataFrame object, representing data an estimator predicts on
42 model : BasicModel , default: None
43 A scikit-learn-compatible fitted estimator instance
44 model_name: str , default: ''
45 The name of the model
46 scorers : Mapping[str, Metric] , default: None
47 dict of scorers names to a Metric
48 scorers_per_class : Mapping[str, Metric] , default: None
49 dict of scorers for classification without averaging of the classes.
50 See <a href=
51 "https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel">
52 scikit-learn docs</a>
53 device : Union[str, torch.device], default: 'cpu'
54 processing unit for use
55 random_state : int
56 A seed to set for pseudo-random functions
57 n_samples : int, default: None
58 """
59
60 def __init__(self,
61 train: VisionData = None,
62 test: VisionData = None,
63 model: nn.Module = None,
64 model_name: str = '',
65 scorers: Mapping[str, Metric] = None,
66 scorers_per_class: Mapping[str, Metric] = None,
67 device: Union[str, torch.device, None] = 'cpu',
68 random_state: int = 42,
69 n_samples: int = None
70 ):
71 # Validations
72 if train is None and test is None and model is None:
73 raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')
74 if test and not train:
75 raise DatasetValidationError('Can\'t initialize context with only test. if you have single dataset, '
76 'initialize it as train')
77 if train and test:
78 train.validate_shared_label(test)
79
80 self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))
81 self._prediction_formatter_error = {}
82
83 if model is not None:
84 if not isinstance(model, nn.Module):
85 logger.warning('Model is not a torch.nn.Module. Deepchecks can\'t validate that model is in '
86 'evaluation state.')
87 elif model.training:
88 raise DatasetValidationError('Model is not in evaluation state. Please set model training '
89 'parameter to False or run model.eval() before passing it.')
90
91 for dataset, dataset_type in zip([train, test], [DatasetKind.TRAIN, DatasetKind.TEST]):
92 if dataset is not None:
93 try:
94 dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)
95 msg = None
96 except DeepchecksNotImplementedError:
97 msg = f'infer_on_batch() was not implemented in {dataset_type} ' \
98 f'dataset, some checks will not run'
99 except ValidationError as ex:
100 msg = f'batch_to_images() was not implemented correctly in {dataset_type}, the ' \
101 f'validation has failed with the error: {ex}. To test your prediction formatting use the ' \
102 f'function `vision_data.validate_prediction(batch, model, device)`'
103
104 if msg:
105 self._prediction_formatter_error[dataset_type] = msg
106 logger.warning(msg)
107
108 # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.
109 # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user
110 # wants to run without random_state we need to forcefully shuffle (to have different results on different runs
111 # from the same VisionData object), and if there is a random_state the shuffle will always have same result
112 if train:
113 train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)
114 if test:
115 test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)
116
117 self._train = train
118 self._test = test
119 self._model = model
120 self._user_scorers = scorers
121 self._user_scorers_per_class = scorers_per_class
122 self._model_name = model_name
123 self.random_state = random_state
124
125 # Properties
126 # Validations note: We know train & test fit each other so all validations can be run only on train
127
128 @property
129 def train(self) -> VisionData:
130 """Return train if exists, otherwise raise error."""
131 if self._train is None:
132 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')
133 return self._train
134
135 @property
136 def test(self) -> VisionData:
137 """Return test if exists, otherwise raise error."""
138 if self._test is None:
139 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')
140 return self._test
141
142 @property
143 def model(self) -> nn.Module:
144 """Return & validate model if model exists, otherwise raise error."""
145 if self._model is None:
146 raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')
147 return self._model
148
149 @property
150 def model_name(self):
151 """Return model name."""
152 return self._model_name
153
154 @property
155 def device(self) -> torch.device:
156 """Return device specified by the user."""
157 return self._device
158
159 def have_test(self):
160 """Return whether there is test dataset defined."""
161 return self._test is not None
162
163 def assert_task_type(self, *expected_types: TaskType):
164 """Assert task_type matching given types."""
165 if self.train.task_type not in expected_types:
166 raise ModelValidationError(
167 f'Check is irrelevant for task of type {self.train.task_type}')
168 return True
169
170 def assert_predictions_valid(self, kind: DatasetKind = None):
171 """Assert that for given DatasetKind the model & dataset infer_on_batch return predictions in right format."""
172 error = self._prediction_formatter_error.get(kind)
173 if error:
174 raise DeepchecksValueError(error)
175
176 def get_data_by_kind(self, kind: DatasetKind):
177 """Return the relevant VisionData by given kind."""
178 if kind == DatasetKind.TRAIN:
179 return self.train
180 elif kind == DatasetKind.TEST:
181 return self.test
182 else:
183 raise DeepchecksValueError(f'Unexpected dataset kind {kind}')
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/vision/context.py b/deepchecks/vision/context.py
--- a/deepchecks/vision/context.py
+++ b/deepchecks/vision/context.py
@@ -97,7 +97,7 @@
msg = f'infer_on_batch() was not implemented in {dataset_type} ' \
f'dataset, some checks will not run'
except ValidationError as ex:
- msg = f'batch_to_images() was not implemented correctly in {dataset_type}, the ' \
+ msg = f'infer_on_batch() was not implemented correctly in {dataset_type}, the ' \
f'validation has failed with the error: {ex}. To test your prediction formatting use the ' \
f'function `vision_data.validate_prediction(batch, model, device)`'
|
{"golden_diff": "diff --git a/deepchecks/vision/context.py b/deepchecks/vision/context.py\n--- a/deepchecks/vision/context.py\n+++ b/deepchecks/vision/context.py\n@@ -97,7 +97,7 @@\n msg = f'infer_on_batch() was not implemented in {dataset_type} ' \\\n f'dataset, some checks will not run'\n except ValidationError as ex:\n- msg = f'batch_to_images() was not implemented correctly in {dataset_type}, the ' \\\n+ msg = f'infer_on_batch() was not implemented correctly in {dataset_type}, the ' \\\n f'validation has failed with the error: {ex}. To test your prediction formatting use the ' \\\n f'function `vision_data.validate_prediction(batch, model, device)`'\n", "issue": "[BUG] [CV] typo in context prediction validation\n**Describe the bug**\r\nit says batch_to_images instead of infer_on_batch\r\n\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module for base vision context.\"\"\"\nimport logging\nfrom typing import Mapping, Union\n\nimport torch\nfrom torch import nn\nfrom ignite.metrics import Metric\n\nfrom deepchecks.core import DatasetKind\nfrom deepchecks.vision.vision_data import VisionData, TaskType\nfrom deepchecks.core.errors import (\n DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,\n DeepchecksNotSupportedError, DeepchecksValueError, ValidationError\n)\n\n\n__all__ = ['Context']\n\n\nlogger = logging.getLogger('deepchecks')\n\n\nclass Context:\n \"\"\"Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.\n\n Parameters\n ----------\n train : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator was fitted on\n test : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator predicts on\n model : BasicModel , default: None\n A scikit-learn-compatible fitted estimator instance\n model_name: str , default: ''\n The name of the model\n scorers : Mapping[str, Metric] , default: None\n dict of scorers names to a Metric\n scorers_per_class : Mapping[str, Metric] , default: None\n dict of scorers for classification without averaging of the classes.\n See <a href=\n \"https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel\">\n scikit-learn docs</a>\n device : Union[str, torch.device], default: 'cpu'\n processing unit for use\n random_state : int\n A seed to set for pseudo-random functions\n n_samples : int, default: None\n \"\"\"\n\n def __init__(self,\n train: VisionData = None,\n test: VisionData = None,\n model: nn.Module = None,\n model_name: str = '',\n scorers: Mapping[str, Metric] = None,\n scorers_per_class: Mapping[str, Metric] = None,\n device: Union[str, torch.device, None] = 'cpu',\n random_state: int = 42,\n n_samples: int = None\n ):\n # Validations\n if train is None and test is None and model is None:\n raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')\n if test and not train:\n raise DatasetValidationError('Can\\'t initialize context with only test. if you have single dataset, '\n 'initialize it as train')\n if train and test:\n train.validate_shared_label(test)\n\n self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))\n self._prediction_formatter_error = {}\n\n if model is not None:\n if not isinstance(model, nn.Module):\n logger.warning('Model is not a torch.nn.Module. Deepchecks can\\'t validate that model is in '\n 'evaluation state.')\n elif model.training:\n raise DatasetValidationError('Model is not in evaluation state. Please set model training '\n 'parameter to False or run model.eval() before passing it.')\n\n for dataset, dataset_type in zip([train, test], [DatasetKind.TRAIN, DatasetKind.TEST]):\n if dataset is not None:\n try:\n dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)\n msg = None\n except DeepchecksNotImplementedError:\n msg = f'infer_on_batch() was not implemented in {dataset_type} ' \\\n f'dataset, some checks will not run'\n except ValidationError as ex:\n msg = f'batch_to_images() was not implemented correctly in {dataset_type}, the ' \\\n f'validation has failed with the error: {ex}. To test your prediction formatting use the ' \\\n f'function `vision_data.validate_prediction(batch, model, device)`'\n\n if msg:\n self._prediction_formatter_error[dataset_type] = msg\n logger.warning(msg)\n\n # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.\n # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user\n # wants to run without random_state we need to forcefully shuffle (to have different results on different runs\n # from the same VisionData object), and if there is a random_state the shuffle will always have same result\n if train:\n train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n if test:\n test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n\n self._train = train\n self._test = test\n self._model = model\n self._user_scorers = scorers\n self._user_scorers_per_class = scorers_per_class\n self._model_name = model_name\n self.random_state = random_state\n\n # Properties\n # Validations note: We know train & test fit each other so all validations can be run only on train\n\n @property\n def train(self) -> VisionData:\n \"\"\"Return train if exists, otherwise raise error.\"\"\"\n if self._train is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')\n return self._train\n\n @property\n def test(self) -> VisionData:\n \"\"\"Return test if exists, otherwise raise error.\"\"\"\n if self._test is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')\n return self._test\n\n @property\n def model(self) -> nn.Module:\n \"\"\"Return & validate model if model exists, otherwise raise error.\"\"\"\n if self._model is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')\n return self._model\n\n @property\n def model_name(self):\n \"\"\"Return model name.\"\"\"\n return self._model_name\n\n @property\n def device(self) -> torch.device:\n \"\"\"Return device specified by the user.\"\"\"\n return self._device\n\n def have_test(self):\n \"\"\"Return whether there is test dataset defined.\"\"\"\n return self._test is not None\n\n def assert_task_type(self, *expected_types: TaskType):\n \"\"\"Assert task_type matching given types.\"\"\"\n if self.train.task_type not in expected_types:\n raise ModelValidationError(\n f'Check is irrelevant for task of type {self.train.task_type}')\n return True\n\n def assert_predictions_valid(self, kind: DatasetKind = None):\n \"\"\"Assert that for given DatasetKind the model & dataset infer_on_batch return predictions in right format.\"\"\"\n error = self._prediction_formatter_error.get(kind)\n if error:\n raise DeepchecksValueError(error)\n\n def get_data_by_kind(self, kind: DatasetKind):\n \"\"\"Return the relevant VisionData by given kind.\"\"\"\n if kind == DatasetKind.TRAIN:\n return self.train\n elif kind == DatasetKind.TEST:\n return self.test\n else:\n raise DeepchecksValueError(f'Unexpected dataset kind {kind}')\n", "path": "deepchecks/vision/context.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module for base vision context.\"\"\"\nimport logging\nfrom typing import Mapping, Union\n\nimport torch\nfrom torch import nn\nfrom ignite.metrics import Metric\n\nfrom deepchecks.core import DatasetKind\nfrom deepchecks.vision.vision_data import VisionData, TaskType\nfrom deepchecks.core.errors import (\n DatasetValidationError, DeepchecksNotImplementedError, ModelValidationError,\n DeepchecksNotSupportedError, DeepchecksValueError, ValidationError\n)\n\n\n__all__ = ['Context']\n\n\nlogger = logging.getLogger('deepchecks')\n\n\nclass Context:\n \"\"\"Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly.\n\n Parameters\n ----------\n train : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator was fitted on\n test : VisionData , default: None\n Dataset or DataFrame object, representing data an estimator predicts on\n model : BasicModel , default: None\n A scikit-learn-compatible fitted estimator instance\n model_name: str , default: ''\n The name of the model\n scorers : Mapping[str, Metric] , default: None\n dict of scorers names to a Metric\n scorers_per_class : Mapping[str, Metric] , default: None\n dict of scorers for classification without averaging of the classes.\n See <a href=\n \"https://scikit-learn.org/stable/modules/model_evaluation.html#from-binary-to-multiclass-and-multilabel\">\n scikit-learn docs</a>\n device : Union[str, torch.device], default: 'cpu'\n processing unit for use\n random_state : int\n A seed to set for pseudo-random functions\n n_samples : int, default: None\n \"\"\"\n\n def __init__(self,\n train: VisionData = None,\n test: VisionData = None,\n model: nn.Module = None,\n model_name: str = '',\n scorers: Mapping[str, Metric] = None,\n scorers_per_class: Mapping[str, Metric] = None,\n device: Union[str, torch.device, None] = 'cpu',\n random_state: int = 42,\n n_samples: int = None\n ):\n # Validations\n if train is None and test is None and model is None:\n raise DeepchecksValueError('At least one dataset (or model) must be passed to the method!')\n if test and not train:\n raise DatasetValidationError('Can\\'t initialize context with only test. if you have single dataset, '\n 'initialize it as train')\n if train and test:\n train.validate_shared_label(test)\n\n self._device = torch.device(device) if isinstance(device, str) else (device if device else torch.device('cpu'))\n self._prediction_formatter_error = {}\n\n if model is not None:\n if not isinstance(model, nn.Module):\n logger.warning('Model is not a torch.nn.Module. Deepchecks can\\'t validate that model is in '\n 'evaluation state.')\n elif model.training:\n raise DatasetValidationError('Model is not in evaluation state. Please set model training '\n 'parameter to False or run model.eval() before passing it.')\n\n for dataset, dataset_type in zip([train, test], [DatasetKind.TRAIN, DatasetKind.TEST]):\n if dataset is not None:\n try:\n dataset.validate_prediction(next(iter(dataset.data_loader)), model, self._device)\n msg = None\n except DeepchecksNotImplementedError:\n msg = f'infer_on_batch() was not implemented in {dataset_type} ' \\\n f'dataset, some checks will not run'\n except ValidationError as ex:\n msg = f'infer_on_batch() was not implemented correctly in {dataset_type}, the ' \\\n f'validation has failed with the error: {ex}. To test your prediction formatting use the ' \\\n f'function `vision_data.validate_prediction(batch, model, device)`'\n\n if msg:\n self._prediction_formatter_error[dataset_type] = msg\n logger.warning(msg)\n\n # The copy does 2 things: Sample n_samples if parameter exists, and shuffle the data.\n # we shuffle because the data in VisionData is set to be sampled in a fixed order (in the init), so if the user\n # wants to run without random_state we need to forcefully shuffle (to have different results on different runs\n # from the same VisionData object), and if there is a random_state the shuffle will always have same result\n if train:\n train = train.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n if test:\n test = test.copy(shuffle=True, n_samples=n_samples, random_state=random_state)\n\n self._train = train\n self._test = test\n self._model = model\n self._user_scorers = scorers\n self._user_scorers_per_class = scorers_per_class\n self._model_name = model_name\n self.random_state = random_state\n\n # Properties\n # Validations note: We know train & test fit each other so all validations can be run only on train\n\n @property\n def train(self) -> VisionData:\n \"\"\"Return train if exists, otherwise raise error.\"\"\"\n if self._train is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without train dataset')\n return self._train\n\n @property\n def test(self) -> VisionData:\n \"\"\"Return test if exists, otherwise raise error.\"\"\"\n if self._test is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without test dataset')\n return self._test\n\n @property\n def model(self) -> nn.Module:\n \"\"\"Return & validate model if model exists, otherwise raise error.\"\"\"\n if self._model is None:\n raise DeepchecksNotSupportedError('Check is irrelevant for Datasets without model')\n return self._model\n\n @property\n def model_name(self):\n \"\"\"Return model name.\"\"\"\n return self._model_name\n\n @property\n def device(self) -> torch.device:\n \"\"\"Return device specified by the user.\"\"\"\n return self._device\n\n def have_test(self):\n \"\"\"Return whether there is test dataset defined.\"\"\"\n return self._test is not None\n\n def assert_task_type(self, *expected_types: TaskType):\n \"\"\"Assert task_type matching given types.\"\"\"\n if self.train.task_type not in expected_types:\n raise ModelValidationError(\n f'Check is irrelevant for task of type {self.train.task_type}')\n return True\n\n def assert_predictions_valid(self, kind: DatasetKind = None):\n \"\"\"Assert that for given DatasetKind the model & dataset infer_on_batch return predictions in right format.\"\"\"\n error = self._prediction_formatter_error.get(kind)\n if error:\n raise DeepchecksValueError(error)\n\n def get_data_by_kind(self, kind: DatasetKind):\n \"\"\"Return the relevant VisionData by given kind.\"\"\"\n if kind == DatasetKind.TRAIN:\n return self.train\n elif kind == DatasetKind.TEST:\n return self.test\n else:\n raise DeepchecksValueError(f'Unexpected dataset kind {kind}')\n", "path": "deepchecks/vision/context.py"}]}
| 2,450 | 170 |
gh_patches_debug_11224
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-3248
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Web3.isAddress doesn't work for non prefixed checksumed values
* Version: 4.0.0b11
* Python: 3.6
* OS: linux
### What was wrong?
As stated in the docs http://web3py.readthedocs.io/en/latest/overview.html#Web3.isAddress the function Web3.isAddress(value) should **allow both 0x prefixed and non prefixed values**.
If the address is not checksumed, it's ok not to have the **0x**:
```
>>> Web3.isAddress('d3cda913deb6f67967b99d67acdfa1712c293601')
>>> True
```
But if it's checksumed
```
>>> Web3.isAddress('d3CdA913deB6f67967B99D67aCDFa1712C293601')
>>> False
```
No problem if we add the **0x**:
```
>>> Web3.isAddress('0xd3CdA913deB6f67967B99D67aCDFa1712C293601')
>>> True
```
### How can it be fixed?
Changing the documentation to state that checksumed addresses must have 0x or changing the function to accept checksumed addresses with 0x. I would just remove 0x at the beginning of the function (if found) and work with the address as that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import (
3 find_packages,
4 setup,
5 )
6
7 extras_require = {
8 "tester": [
9 "eth-tester[py-evm]==v0.10.0-b.1",
10 "py-geth>=4.1.0",
11 ],
12 "linter": [
13 "black>=22.1.0",
14 "flake8==3.8.3",
15 "isort>=5.11.0",
16 "mypy==1.4.1",
17 "types-setuptools>=57.4.4",
18 "types-requests>=2.26.1",
19 "types-protobuf==3.19.13",
20 ],
21 "docs": [
22 "sphinx>=5.3.0",
23 "sphinx_rtd_theme>=1.0.0",
24 "towncrier>=21,<22",
25 ],
26 "dev": [
27 "bumpversion",
28 "flaky>=3.7.0",
29 "hypothesis>=3.31.2",
30 "importlib-metadata<5.0;python_version<'3.8'",
31 "pytest>=7.0.0",
32 "pytest-asyncio>=0.18.1,<0.23",
33 "pytest-mock>=1.10",
34 "pytest-watch>=4.2",
35 "pytest-xdist>=1.29",
36 "setuptools>=38.6.0",
37 "tox>=3.18.0",
38 "tqdm>4.32",
39 "twine>=1.13",
40 "when-changed>=0.3.0",
41 "build>=0.9.0",
42 ],
43 "ipfs": [
44 "ipfshttpclient==0.8.0a2",
45 ],
46 }
47
48 extras_require["dev"] = (
49 extras_require["tester"]
50 + extras_require["linter"]
51 + extras_require["docs"]
52 + extras_require["ipfs"]
53 + extras_require["dev"]
54 )
55
56 with open("./README.md") as readme:
57 long_description = readme.read()
58
59 setup(
60 name="web3",
61 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
62 version="6.14.0",
63 description="""web3.py""",
64 long_description_content_type="text/markdown",
65 long_description=long_description,
66 author="The Ethereum Foundation",
67 author_email="[email protected]",
68 url="https://github.com/ethereum/web3.py",
69 include_package_data=True,
70 install_requires=[
71 "aiohttp>=3.7.4.post0",
72 "eth-abi>=4.0.0",
73 "eth-account>=0.8.0",
74 "eth-hash[pycryptodome]>=0.5.1",
75 "eth-typing>=3.0.0",
76 "eth-utils>=2.1.0",
77 "hexbytes>=0.1.0,<0.4.0",
78 "jsonschema>=4.0.0",
79 "protobuf>=4.21.6",
80 "pydantic>=2.4.0",
81 "pywin32>=223;platform_system=='Windows'",
82 "requests>=2.16.0",
83 "typing-extensions>=4.0.1",
84 "websockets>=10.0.0",
85 "pyunormalize>=15.0.0",
86 ],
87 python_requires=">=3.8",
88 extras_require=extras_require,
89 py_modules=["web3", "ens", "ethpm"],
90 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]},
91 license="MIT",
92 zip_safe=False,
93 keywords="ethereum",
94 packages=find_packages(exclude=["tests", "tests.*"]),
95 package_data={"web3": ["py.typed"]},
96 classifiers=[
97 "Development Status :: 5 - Production/Stable",
98 "Intended Audience :: Developers",
99 "License :: OSI Approved :: MIT License",
100 "Natural Language :: English",
101 "Programming Language :: Python :: 3",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Programming Language :: Python :: 3.10",
105 "Programming Language :: Python :: 3.11",
106 ],
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
extras_require = {
"tester": [
- "eth-tester[py-evm]==v0.10.0-b.1",
+ "eth-tester[py-evm]==v0.10.0-b.3",
"py-geth>=4.1.0",
],
"linter": [
@@ -73,7 +73,7 @@
"eth-account>=0.8.0",
"eth-hash[pycryptodome]>=0.5.1",
"eth-typing>=3.0.0",
- "eth-utils>=2.1.0",
+ "eth-utils>=4.0.0",
"hexbytes>=0.1.0,<0.4.0",
"jsonschema>=4.0.0",
"protobuf>=4.21.6",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,7 @@\n \n extras_require = {\n \"tester\": [\n- \"eth-tester[py-evm]==v0.10.0-b.1\",\n+ \"eth-tester[py-evm]==v0.10.0-b.3\",\n \"py-geth>=4.1.0\",\n ],\n \"linter\": [\n@@ -73,7 +73,7 @@\n \"eth-account>=0.8.0\",\n \"eth-hash[pycryptodome]>=0.5.1\",\n \"eth-typing>=3.0.0\",\n- \"eth-utils>=2.1.0\",\n+ \"eth-utils>=4.0.0\",\n \"hexbytes>=0.1.0,<0.4.0\",\n \"jsonschema>=4.0.0\",\n \"protobuf>=4.21.6\",\n", "issue": "Web3.isAddress doesn't work for non prefixed checksumed values\n* Version: 4.0.0b11\r\n* Python: 3.6\r\n* OS: linux\r\n\r\n### What was wrong?\r\n\r\nAs stated in the docs http://web3py.readthedocs.io/en/latest/overview.html#Web3.isAddress the function Web3.isAddress(value) should **allow both 0x prefixed and non prefixed values**.\r\n\r\nIf the address is not checksumed, it's ok not to have the **0x**:\r\n\r\n```\r\n>>> Web3.isAddress('d3cda913deb6f67967b99d67acdfa1712c293601')\r\n>>> True\r\n```\r\n\r\nBut if it's checksumed\r\n\r\n```\r\n>>> Web3.isAddress('d3CdA913deB6f67967B99D67aCDFa1712C293601')\r\n>>> False\r\n```\r\n\r\nNo problem if we add the **0x**:\r\n\r\n```\r\n>>> Web3.isAddress('0xd3CdA913deB6f67967B99D67aCDFa1712C293601')\r\n>>> True\r\n```\r\n\r\n### How can it be fixed?\r\n\r\nChanging the documentation to state that checksumed addresses must have 0x or changing the function to accept checksumed addresses with 0x. I would just remove 0x at the beginning of the function (if found) and work with the address as that. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n \"tester\": [\n \"eth-tester[py-evm]==v0.10.0-b.1\",\n \"py-geth>=4.1.0\",\n ],\n \"linter\": [\n \"black>=22.1.0\",\n \"flake8==3.8.3\",\n \"isort>=5.11.0\",\n \"mypy==1.4.1\",\n \"types-setuptools>=57.4.4\",\n \"types-requests>=2.26.1\",\n \"types-protobuf==3.19.13\",\n ],\n \"docs\": [\n \"sphinx>=5.3.0\",\n \"sphinx_rtd_theme>=1.0.0\",\n \"towncrier>=21,<22\",\n ],\n \"dev\": [\n \"bumpversion\",\n \"flaky>=3.7.0\",\n \"hypothesis>=3.31.2\",\n \"importlib-metadata<5.0;python_version<'3.8'\",\n \"pytest>=7.0.0\",\n \"pytest-asyncio>=0.18.1,<0.23\",\n \"pytest-mock>=1.10\",\n \"pytest-watch>=4.2\",\n \"pytest-xdist>=1.29\",\n \"setuptools>=38.6.0\",\n \"tox>=3.18.0\",\n \"tqdm>4.32\",\n \"twine>=1.13\",\n \"when-changed>=0.3.0\",\n \"build>=0.9.0\",\n ],\n \"ipfs\": [\n \"ipfshttpclient==0.8.0a2\",\n ],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"tester\"]\n + extras_require[\"linter\"]\n + extras_require[\"docs\"]\n + extras_require[\"ipfs\"]\n + extras_require[\"dev\"]\n)\n\nwith open(\"./README.md\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"web3\",\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version=\"6.14.0\",\n description=\"\"\"web3.py\"\"\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n author=\"The Ethereum Foundation\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ethereum/web3.py\",\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0\",\n \"eth-abi>=4.0.0\",\n \"eth-account>=0.8.0\",\n \"eth-hash[pycryptodome]>=0.5.1\",\n \"eth-typing>=3.0.0\",\n \"eth-utils>=2.1.0\",\n \"hexbytes>=0.1.0,<0.4.0\",\n \"jsonschema>=4.0.0\",\n \"protobuf>=4.21.6\",\n \"pydantic>=2.4.0\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0\",\n \"typing-extensions>=4.0.1\",\n \"websockets>=10.0.0\",\n \"pyunormalize>=15.0.0\",\n ],\n python_requires=\">=3.8\",\n extras_require=extras_require,\n py_modules=[\"web3\", \"ens\", \"ethpm\"],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords=\"ethereum\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n \"tester\": [\n \"eth-tester[py-evm]==v0.10.0-b.3\",\n \"py-geth>=4.1.0\",\n ],\n \"linter\": [\n \"black>=22.1.0\",\n \"flake8==3.8.3\",\n \"isort>=5.11.0\",\n \"mypy==1.4.1\",\n \"types-setuptools>=57.4.4\",\n \"types-requests>=2.26.1\",\n \"types-protobuf==3.19.13\",\n ],\n \"docs\": [\n \"sphinx>=5.3.0\",\n \"sphinx_rtd_theme>=1.0.0\",\n \"towncrier>=21,<22\",\n ],\n \"dev\": [\n \"bumpversion\",\n \"flaky>=3.7.0\",\n \"hypothesis>=3.31.2\",\n \"importlib-metadata<5.0;python_version<'3.8'\",\n \"pytest>=7.0.0\",\n \"pytest-asyncio>=0.18.1,<0.23\",\n \"pytest-mock>=1.10\",\n \"pytest-watch>=4.2\",\n \"pytest-xdist>=1.29\",\n \"setuptools>=38.6.0\",\n \"tox>=3.18.0\",\n \"tqdm>4.32\",\n \"twine>=1.13\",\n \"when-changed>=0.3.0\",\n \"build>=0.9.0\",\n ],\n \"ipfs\": [\n \"ipfshttpclient==0.8.0a2\",\n ],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"tester\"]\n + extras_require[\"linter\"]\n + extras_require[\"docs\"]\n + extras_require[\"ipfs\"]\n + extras_require[\"dev\"]\n)\n\nwith open(\"./README.md\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"web3\",\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version=\"6.14.0\",\n description=\"\"\"web3.py\"\"\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n author=\"The Ethereum Foundation\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ethereum/web3.py\",\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0\",\n \"eth-abi>=4.0.0\",\n \"eth-account>=0.8.0\",\n \"eth-hash[pycryptodome]>=0.5.1\",\n \"eth-typing>=3.0.0\",\n \"eth-utils>=4.0.0\",\n \"hexbytes>=0.1.0,<0.4.0\",\n \"jsonschema>=4.0.0\",\n \"protobuf>=4.21.6\",\n \"pydantic>=2.4.0\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0\",\n \"typing-extensions>=4.0.1\",\n \"websockets>=10.0.0\",\n \"pyunormalize>=15.0.0\",\n ],\n python_requires=\">=3.8\",\n extras_require=extras_require,\n py_modules=[\"web3\", \"ens\", \"ethpm\"],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords=\"ethereum\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n)\n", "path": "setup.py"}]}
| 1,781 | 219 |
gh_patches_debug_17300
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-889
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime
## Description
When running `make test`, many warnings of this form are observed:
```
/home/terraso/.local/lib/python3.11/site-packages/django/db/models/fields/__init__.py:1595: RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime (2023-07-11 22:39:48.700825) while time zone support is active.
warnings.warn(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/audit_logs/services.py`
Content:
```
1 import typing
2 from datetime import datetime
3 from enum import Enum
4
5 from django.contrib.contenttypes.models import ContentType
6 from django.core.paginator import Paginator
7 from django.db import transaction
8 from django.db.models.query import QuerySet
9
10 from apps.core.models import User
11
12 from . import api, models
13
14 TEMPLATE = "{client_time} - {user} {action} {resource}"
15
16
17 class _AuditLogService:
18 """
19 AuditLogService implements the AuditLog protocol
20 """
21
22 def log(
23 self,
24 user: User,
25 action: api.ACTIONS,
26 resource: object,
27 metadata: typing.Optional[dict[str, any]] = None,
28 client_time: typing.Optional[datetime] = None,
29 ) -> None:
30 """
31 log logs an action performed by a user on a resource
32 example:
33 log(user, "create", resource, client_time=1234567890)
34 :param client_time:
35 :param metadata:
36 :param action:
37 :param user:
38 :type resource: object
39
40 """
41 if not hasattr(user, "id"):
42 raise ValueError("Invalid user")
43
44 get_user_readable = getattr(user, "human_readable", None)
45 user_readable = get_user_readable() if callable(get_user_readable) else user.full_name()
46
47 if not isinstance(action, Enum) or not hasattr(models.Events, action.value):
48 raise ValueError("Invalid action")
49
50 resource_id = resource.id if hasattr(resource, "id") else None
51 if resource_id is None:
52 raise ValueError("Invalid resource")
53
54 get_resource_human_readable = getattr(resource, "human_readable", None)
55 if callable(get_resource_human_readable):
56 resource_human_readable = get_resource_human_readable()
57 else:
58 resource_human_readable = resource_id
59
60 content_type = ContentType.objects.get_for_model(resource)
61 resource_obj = resource
62
63 resource_repr = resource.__dict__.__str__()
64
65 if metadata is None:
66 metadata = {}
67
68 with transaction.atomic():
69 log = models.Log(
70 user=user,
71 event=action.value,
72 resource_id=resource_id,
73 resource_content_type=content_type,
74 resource_object=resource_obj,
75 resource_json_repr=resource_repr,
76 resource_human_readable=str(resource_human_readable),
77 user_human_readable=str(user_readable),
78 )
79
80 if client_time is None:
81 client_time = datetime.now()
82 log.client_timestamp = client_time
83
84 log.metadata = metadata
85 log.save()
86
87
88 class LogData:
89 """
90 LazyPaginator implements the Paginator protocol
91 """
92
93 def __init__(self, data: QuerySet):
94 self.data = data
95
96 def get_paginator(self, page_size: int = 10):
97 return Paginator(self.data, page_size)
98
99 def __len__(self):
100 return len(self.data)
101
102 def __iter__(self):
103 return iter(self.data)
104
105
106 def new_audit_logger() -> api.AuditLog:
107 """
108 new_audit_logger creates a new audit log
109 """
110 return _AuditLogService()
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/terraso_backend/apps/audit_logs/services.py b/terraso_backend/apps/audit_logs/services.py
--- a/terraso_backend/apps/audit_logs/services.py
+++ b/terraso_backend/apps/audit_logs/services.py
@@ -2,6 +2,7 @@
from datetime import datetime
from enum import Enum
+from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.db import transaction
@@ -79,7 +80,12 @@
if client_time is None:
client_time = datetime.now()
- log.client_timestamp = client_time
+ if settings.USE_TZ:
+ from django.utils.timezone import make_aware
+
+ log.client_timestamp = make_aware(client_time)
+ else:
+ log.client_timestamp = client_time
log.metadata = metadata
log.save()
|
{"golden_diff": "diff --git a/terraso_backend/apps/audit_logs/services.py b/terraso_backend/apps/audit_logs/services.py\n--- a/terraso_backend/apps/audit_logs/services.py\n+++ b/terraso_backend/apps/audit_logs/services.py\n@@ -2,6 +2,7 @@\n from datetime import datetime\n from enum import Enum\n \n+from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.core.paginator import Paginator\n from django.db import transaction\n@@ -79,7 +80,12 @@\n \n if client_time is None:\n client_time = datetime.now()\n- log.client_timestamp = client_time\n+ if settings.USE_TZ:\n+ from django.utils.timezone import make_aware\n+\n+ log.client_timestamp = make_aware(client_time)\n+ else:\n+ log.client_timestamp = client_time\n \n log.metadata = metadata\n log.save()\n", "issue": "RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime\n## Description\r\nWhen running `make test`, many warnings of this form are observed:\r\n```\r\n /home/terraso/.local/lib/python3.11/site-packages/django/db/models/fields/__init__.py:1595: RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime (2023-07-11 22:39:48.700825) while time zone support is active.\r\n warnings.warn(\r\n```\n", "before_files": [{"content": "import typing\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models.query import QuerySet\n\nfrom apps.core.models import User\n\nfrom . import api, models\n\nTEMPLATE = \"{client_time} - {user} {action} {resource}\"\n\n\nclass _AuditLogService:\n \"\"\"\n AuditLogService implements the AuditLog protocol\n \"\"\"\n\n def log(\n self,\n user: User,\n action: api.ACTIONS,\n resource: object,\n metadata: typing.Optional[dict[str, any]] = None,\n client_time: typing.Optional[datetime] = None,\n ) -> None:\n \"\"\"\n log logs an action performed by a user on a resource\n example:\n log(user, \"create\", resource, client_time=1234567890)\n :param client_time:\n :param metadata:\n :param action:\n :param user:\n :type resource: object\n\n \"\"\"\n if not hasattr(user, \"id\"):\n raise ValueError(\"Invalid user\")\n\n get_user_readable = getattr(user, \"human_readable\", None)\n user_readable = get_user_readable() if callable(get_user_readable) else user.full_name()\n\n if not isinstance(action, Enum) or not hasattr(models.Events, action.value):\n raise ValueError(\"Invalid action\")\n\n resource_id = resource.id if hasattr(resource, \"id\") else None\n if resource_id is None:\n raise ValueError(\"Invalid resource\")\n\n get_resource_human_readable = getattr(resource, \"human_readable\", None)\n if callable(get_resource_human_readable):\n resource_human_readable = get_resource_human_readable()\n else:\n resource_human_readable = resource_id\n\n content_type = ContentType.objects.get_for_model(resource)\n resource_obj = resource\n\n resource_repr = resource.__dict__.__str__()\n\n if metadata is None:\n metadata = {}\n\n with transaction.atomic():\n log = models.Log(\n user=user,\n event=action.value,\n resource_id=resource_id,\n resource_content_type=content_type,\n resource_object=resource_obj,\n resource_json_repr=resource_repr,\n resource_human_readable=str(resource_human_readable),\n user_human_readable=str(user_readable),\n )\n\n if client_time is None:\n client_time = datetime.now()\n log.client_timestamp = client_time\n\n log.metadata = metadata\n log.save()\n\n\nclass LogData:\n \"\"\"\n LazyPaginator implements the Paginator protocol\n \"\"\"\n\n def __init__(self, data: QuerySet):\n self.data = data\n\n def get_paginator(self, page_size: int = 10):\n return Paginator(self.data, page_size)\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n\ndef new_audit_logger() -> api.AuditLog:\n \"\"\"\n new_audit_logger creates a new audit log\n \"\"\"\n return _AuditLogService()\n", "path": "terraso_backend/apps/audit_logs/services.py"}], "after_files": [{"content": "import typing\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models.query import QuerySet\n\nfrom apps.core.models import User\n\nfrom . import api, models\n\nTEMPLATE = \"{client_time} - {user} {action} {resource}\"\n\n\nclass _AuditLogService:\n \"\"\"\n AuditLogService implements the AuditLog protocol\n \"\"\"\n\n def log(\n self,\n user: User,\n action: api.ACTIONS,\n resource: object,\n metadata: typing.Optional[dict[str, any]] = None,\n client_time: typing.Optional[datetime] = None,\n ) -> None:\n \"\"\"\n log logs an action performed by a user on a resource\n example:\n log(user, \"create\", resource, client_time=1234567890)\n :param client_time:\n :param metadata:\n :param action:\n :param user:\n :type resource: object\n\n \"\"\"\n if not hasattr(user, \"id\"):\n raise ValueError(\"Invalid user\")\n\n get_user_readable = getattr(user, \"human_readable\", None)\n user_readable = get_user_readable() if callable(get_user_readable) else user.full_name()\n\n if not isinstance(action, Enum) or not hasattr(models.Events, action.value):\n raise ValueError(\"Invalid action\")\n\n resource_id = resource.id if hasattr(resource, \"id\") else None\n if resource_id is None:\n raise ValueError(\"Invalid resource\")\n\n get_resource_human_readable = getattr(resource, \"human_readable\", None)\n if callable(get_resource_human_readable):\n resource_human_readable = get_resource_human_readable()\n else:\n resource_human_readable = resource_id\n\n content_type = ContentType.objects.get_for_model(resource)\n resource_obj = resource\n\n resource_repr = resource.__dict__.__str__()\n\n if metadata is None:\n metadata = {}\n\n with transaction.atomic():\n log = models.Log(\n user=user,\n event=action.value,\n resource_id=resource_id,\n resource_content_type=content_type,\n resource_object=resource_obj,\n resource_json_repr=resource_repr,\n resource_human_readable=str(resource_human_readable),\n user_human_readable=str(user_readable),\n )\n\n if client_time is None:\n client_time = datetime.now()\n if settings.USE_TZ:\n from django.utils.timezone import make_aware\n\n log.client_timestamp = make_aware(client_time)\n else:\n log.client_timestamp = client_time\n\n log.metadata = metadata\n log.save()\n\n\nclass LogData:\n \"\"\"\n LazyPaginator implements the Paginator protocol\n \"\"\"\n\n def __init__(self, data: QuerySet):\n self.data = data\n\n def get_paginator(self, page_size: int = 10):\n return Paginator(self.data, page_size)\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n\ndef new_audit_logger() -> api.AuditLog:\n \"\"\"\n new_audit_logger creates a new audit log\n \"\"\"\n return _AuditLogService()\n", "path": "terraso_backend/apps/audit_logs/services.py"}]}
| 1,256 | 198 |
gh_patches_debug_10615
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-14007
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DEPR: deprecate SparseList
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/sparse/list.py`
Content:
```
1 import numpy as np
2 from pandas.core.base import PandasObject
3 from pandas.formats.printing import pprint_thing
4
5 from pandas.types.common import is_scalar
6 from pandas.sparse.array import SparseArray
7 import pandas._sparse as splib
8
9
10 class SparseList(PandasObject):
11
12 """
13 Data structure for accumulating data to be converted into a
14 SparseArray. Has similar API to the standard Python list
15
16 Parameters
17 ----------
18 data : scalar or array-like
19 fill_value : scalar, default NaN
20 """
21
22 def __init__(self, data=None, fill_value=np.nan):
23 self.fill_value = fill_value
24 self._chunks = []
25
26 if data is not None:
27 self.append(data)
28
29 def __unicode__(self):
30 contents = '\n'.join(repr(c) for c in self._chunks)
31 return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
32
33 def __len__(self):
34 return sum(len(c) for c in self._chunks)
35
36 def __getitem__(self, i):
37 if i < 0:
38 if i + len(self) < 0: # pragma: no cover
39 raise ValueError('%d out of range' % i)
40 i += len(self)
41
42 passed = 0
43 j = 0
44 while i >= passed + len(self._chunks[j]):
45 passed += len(self._chunks[j])
46 j += 1
47 return self._chunks[j][i - passed]
48
49 def __setitem__(self, i, value):
50 raise NotImplementedError
51
52 @property
53 def nchunks(self):
54 return len(self._chunks)
55
56 @property
57 def is_consolidated(self):
58 return self.nchunks == 1
59
60 def consolidate(self, inplace=True):
61 """
62 Internally consolidate chunks of data
63
64 Parameters
65 ----------
66 inplace : boolean, default True
67 Modify the calling object instead of constructing a new one
68
69 Returns
70 -------
71 splist : SparseList
72 If inplace=False, new object, otherwise reference to existing
73 object
74 """
75 if not inplace:
76 result = self.copy()
77 else:
78 result = self
79
80 if result.is_consolidated:
81 return result
82
83 result._consolidate_inplace()
84 return result
85
86 def _consolidate_inplace(self):
87 new_values = np.concatenate([c.sp_values for c in self._chunks])
88 new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
89 new_arr = SparseArray(new_values, sparse_index=new_index,
90 fill_value=self.fill_value)
91 self._chunks = [new_arr]
92
93 def copy(self):
94 """
95 Return copy of the list
96
97 Returns
98 -------
99 new_list : SparseList
100 """
101 new_splist = SparseList(fill_value=self.fill_value)
102 new_splist._chunks = list(self._chunks)
103 return new_splist
104
105 def to_array(self):
106 """
107 Return SparseArray from data stored in the SparseList
108
109 Returns
110 -------
111 sparr : SparseArray
112 """
113 self.consolidate(inplace=True)
114 return self._chunks[0]
115
116 def append(self, value):
117 """
118 Append element or array-like chunk of data to the SparseList
119
120 Parameters
121 ----------
122 value: scalar or array-like
123 """
124 if is_scalar(value):
125 value = [value]
126
127 sparr = SparseArray(value, fill_value=self.fill_value)
128 self._chunks.append(sparr)
129 self._consolidated = False
130
131
132 def _concat_sparse_indexes(indexes):
133 all_indices = []
134 total_length = 0
135
136 for index in indexes:
137 # increment by offset
138 inds = index.to_int_index().indices + total_length
139
140 all_indices.append(inds)
141 total_length += index.length
142
143 return splib.IntIndex(total_length, np.concatenate(all_indices))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py
--- a/pandas/sparse/list.py
+++ b/pandas/sparse/list.py
@@ -1,3 +1,4 @@
+import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.formats.printing import pprint_thing
@@ -20,6 +21,11 @@
"""
def __init__(self, data=None, fill_value=np.nan):
+
+ # see gh-13784
+ warnings.warn("SparseList is deprecated and will be removed "
+ "in a future version", FutureWarning, stacklevel=2)
+
self.fill_value = fill_value
self._chunks = []
|
{"golden_diff": "diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py\n--- a/pandas/sparse/list.py\n+++ b/pandas/sparse/list.py\n@@ -1,3 +1,4 @@\n+import warnings\n import numpy as np\n from pandas.core.base import PandasObject\n from pandas.formats.printing import pprint_thing\n@@ -20,6 +21,11 @@\n \"\"\"\n \n def __init__(self, data=None, fill_value=np.nan):\n+\n+ # see gh-13784\n+ warnings.warn(\"SparseList is deprecated and will be removed \"\n+ \"in a future version\", FutureWarning, stacklevel=2)\n+\n self.fill_value = fill_value\n self._chunks = []\n", "issue": "DEPR: deprecate SparseList\n\n", "before_files": [{"content": "import numpy as np\nfrom pandas.core.base import PandasObject\nfrom pandas.formats.printing import pprint_thing\n\nfrom pandas.types.common import is_scalar\nfrom pandas.sparse.array import SparseArray\nimport pandas._sparse as splib\n\n\nclass SparseList(PandasObject):\n\n \"\"\"\n Data structure for accumulating data to be converted into a\n SparseArray. Has similar API to the standard Python list\n\n Parameters\n ----------\n data : scalar or array-like\n fill_value : scalar, default NaN\n \"\"\"\n\n def __init__(self, data=None, fill_value=np.nan):\n self.fill_value = fill_value\n self._chunks = []\n\n if data is not None:\n self.append(data)\n\n def __unicode__(self):\n contents = '\\n'.join(repr(c) for c in self._chunks)\n return '%s\\n%s' % (object.__repr__(self), pprint_thing(contents))\n\n def __len__(self):\n return sum(len(c) for c in self._chunks)\n\n def __getitem__(self, i):\n if i < 0:\n if i + len(self) < 0: # pragma: no cover\n raise ValueError('%d out of range' % i)\n i += len(self)\n\n passed = 0\n j = 0\n while i >= passed + len(self._chunks[j]):\n passed += len(self._chunks[j])\n j += 1\n return self._chunks[j][i - passed]\n\n def __setitem__(self, i, value):\n raise NotImplementedError\n\n @property\n def nchunks(self):\n return len(self._chunks)\n\n @property\n def is_consolidated(self):\n return self.nchunks == 1\n\n def consolidate(self, inplace=True):\n \"\"\"\n Internally consolidate chunks of data\n\n Parameters\n ----------\n inplace : boolean, default True\n Modify the calling object instead of constructing a new one\n\n Returns\n -------\n splist : SparseList\n If inplace=False, new object, otherwise reference to existing\n object\n \"\"\"\n if not inplace:\n result = self.copy()\n else:\n result = self\n\n if result.is_consolidated:\n return result\n\n result._consolidate_inplace()\n return result\n\n def _consolidate_inplace(self):\n new_values = np.concatenate([c.sp_values for c in self._chunks])\n new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])\n new_arr = SparseArray(new_values, sparse_index=new_index,\n fill_value=self.fill_value)\n self._chunks = [new_arr]\n\n def copy(self):\n \"\"\"\n Return copy of the list\n\n Returns\n -------\n new_list : SparseList\n \"\"\"\n new_splist = SparseList(fill_value=self.fill_value)\n new_splist._chunks = list(self._chunks)\n return new_splist\n\n def to_array(self):\n \"\"\"\n Return SparseArray from data stored in the SparseList\n\n Returns\n -------\n sparr : SparseArray\n \"\"\"\n self.consolidate(inplace=True)\n return self._chunks[0]\n\n def append(self, value):\n \"\"\"\n Append element or array-like chunk of data to the SparseList\n\n Parameters\n ----------\n value: scalar or array-like\n \"\"\"\n if is_scalar(value):\n value = [value]\n\n sparr = SparseArray(value, fill_value=self.fill_value)\n self._chunks.append(sparr)\n self._consolidated = False\n\n\ndef _concat_sparse_indexes(indexes):\n all_indices = []\n total_length = 0\n\n for index in indexes:\n # increment by offset\n inds = index.to_int_index().indices + total_length\n\n all_indices.append(inds)\n total_length += index.length\n\n return splib.IntIndex(total_length, np.concatenate(all_indices))\n", "path": "pandas/sparse/list.py"}], "after_files": [{"content": "import warnings\nimport numpy as np\nfrom pandas.core.base import PandasObject\nfrom pandas.formats.printing import pprint_thing\n\nfrom pandas.types.common import is_scalar\nfrom pandas.sparse.array import SparseArray\nimport pandas._sparse as splib\n\n\nclass SparseList(PandasObject):\n\n \"\"\"\n Data structure for accumulating data to be converted into a\n SparseArray. Has similar API to the standard Python list\n\n Parameters\n ----------\n data : scalar or array-like\n fill_value : scalar, default NaN\n \"\"\"\n\n def __init__(self, data=None, fill_value=np.nan):\n\n # see gh-13784\n warnings.warn(\"SparseList is deprecated and will be removed \"\n \"in a future version\", FutureWarning, stacklevel=2)\n\n self.fill_value = fill_value\n self._chunks = []\n\n if data is not None:\n self.append(data)\n\n def __unicode__(self):\n contents = '\\n'.join(repr(c) for c in self._chunks)\n return '%s\\n%s' % (object.__repr__(self), pprint_thing(contents))\n\n def __len__(self):\n return sum(len(c) for c in self._chunks)\n\n def __getitem__(self, i):\n if i < 0:\n if i + len(self) < 0: # pragma: no cover\n raise ValueError('%d out of range' % i)\n i += len(self)\n\n passed = 0\n j = 0\n while i >= passed + len(self._chunks[j]):\n passed += len(self._chunks[j])\n j += 1\n return self._chunks[j][i - passed]\n\n def __setitem__(self, i, value):\n raise NotImplementedError\n\n @property\n def nchunks(self):\n return len(self._chunks)\n\n @property\n def is_consolidated(self):\n return self.nchunks == 1\n\n def consolidate(self, inplace=True):\n \"\"\"\n Internally consolidate chunks of data\n\n Parameters\n ----------\n inplace : boolean, default True\n Modify the calling object instead of constructing a new one\n\n Returns\n -------\n splist : SparseList\n If inplace=False, new object, otherwise reference to existing\n object\n \"\"\"\n if not inplace:\n result = self.copy()\n else:\n result = self\n\n if result.is_consolidated:\n return result\n\n result._consolidate_inplace()\n return result\n\n def _consolidate_inplace(self):\n new_values = np.concatenate([c.sp_values for c in self._chunks])\n new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])\n new_arr = SparseArray(new_values, sparse_index=new_index,\n fill_value=self.fill_value)\n self._chunks = [new_arr]\n\n def copy(self):\n \"\"\"\n Return copy of the list\n\n Returns\n -------\n new_list : SparseList\n \"\"\"\n new_splist = SparseList(fill_value=self.fill_value)\n new_splist._chunks = list(self._chunks)\n return new_splist\n\n def to_array(self):\n \"\"\"\n Return SparseArray from data stored in the SparseList\n\n Returns\n -------\n sparr : SparseArray\n \"\"\"\n self.consolidate(inplace=True)\n return self._chunks[0]\n\n def append(self, value):\n \"\"\"\n Append element or array-like chunk of data to the SparseList\n\n Parameters\n ----------\n value: scalar or array-like\n \"\"\"\n if is_scalar(value):\n value = [value]\n\n sparr = SparseArray(value, fill_value=self.fill_value)\n self._chunks.append(sparr)\n self._consolidated = False\n\n\ndef _concat_sparse_indexes(indexes):\n all_indices = []\n total_length = 0\n\n for index in indexes:\n # increment by offset\n inds = index.to_int_index().indices + total_length\n\n all_indices.append(inds)\n total_length += index.length\n\n return splib.IntIndex(total_length, np.concatenate(all_indices))\n", "path": "pandas/sparse/list.py"}]}
| 1,450 | 164 |
gh_patches_debug_12633
|
rasdani/github-patches
|
git_diff
|
liberapay__liberapay.com-2327
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] Always put your primary email address at the top
I first added the "@qq.com" email address, and then added the "@protonmail.com" email address and set it as the new primary email address.
But it was ranked second. Obviously, this order is in chronological order of adding. But the primary email address is already the "primary" address, so why shouldn't it be ranked first?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/payin/paypal.py`
Content:
```
1 from datetime import timedelta
2 import logging
3 import re
4 from time import sleep
5
6 import requests
7 from pando.utils import utcnow
8
9 from ..exceptions import PaymentError
10 from ..i18n.currencies import Money
11 from ..website import website
12 from .common import (
13 abort_payin, update_payin, update_payin_transfer, record_payin_refund,
14 record_payin_transfer_reversal,
15 )
16
17
18 logger = logging.getLogger('paypal')
19
20 session = requests.Session()
21
22
23 def _extract_error_message(response):
24 try:
25 error = response.json()
26 message = error['message']
27 assert message
28 details = error.get('details')
29 if details and isinstance(details, list):
30 message = ' | '.join(
31 ('%(issue)s: %(description)s' % d if d.get('issue') else d['description'])
32 for d in details if d.get('description')
33 ) or message
34 debug_id = error.get('debug_id')
35 if debug_id:
36 message += " | PayPal debug_id: " + debug_id
37 return message
38 except Exception:
39 error = response.text # for Sentry
40 logger.debug(error)
41 raise PaymentError('PayPal')
42
43
44 def _init_session():
45 # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret
46 if 'Authentication' in session.headers:
47 return session
48 from base64 import b64encode
49 session.headers.update({
50 'Authorization': 'Basic ' + b64encode((
51 '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret)
52 ).encode('ascii')).decode('ascii'),
53 })
54 return session
55
56
57 # Version 2
58 # =========
59
60 CAPTURE_STATUSES_MAP = {
61 'COMPLETED': 'succeeded',
62 'DECLINED': 'failed',
63 'PARTIALLY_REFUNDED': 'succeeded',
64 'PENDING': 'pending',
65 'REFUNDED': 'succeeded',
66 }
67 ORDER_STATUSES_MAP = {
68 'APPROVED': 'pending',
69 'COMPLETED': 'succeeded',
70 'CREATED': 'awaiting_payer_action',
71 'SAVED': 'pending',
72 'VOIDED': 'failed',
73 }
74 REFUND_STATUSES_MAP = {
75 'CANCELLED': 'failed',
76 'COMPLETED': 'succeeded',
77 'FAILED': 'failed',
78 'PENDING': 'pending',
79 }
80
81 locale_re = re.compile("^[a-z]{2}(?:-[A-Z][a-z]{3})?(?:-(?:[A-Z]{2}))?$")
82
83
84 def create_order(db, payin, payer, return_url, cancel_url, state):
85 """Create an Order.
86
87 Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_create
88
89 Note: even though the API expects a list of purchase_units it rejects the
90 request if the list contains more than one of them.
91 """
92 transfers = db.all("""
93 SELECT pt.*
94 , recipient.username AS recipient_username
95 , team.username AS team_name
96 , a.id AS merchant_id
97 FROM payin_transfers pt
98 JOIN participants recipient ON recipient.id = pt.recipient
99 LEFT JOIN participants team ON team.id = pt.team
100 JOIN payment_accounts a ON a.pk = pt.destination
101 WHERE pt.payin = %s
102 ORDER BY pt.id
103 """, (payin.id,))
104 assert transfers
105 locale, _, ngettext = state['locale'], state['_'], state['ngettext']
106 # PayPal processes BCP47 tags in a case-sensitive way, and completely rejects
107 # requests containing "improperly" cased values.
108 locale_tag = (
109 locale.language +
110 (f'-{locale.script}' if locale.script else '') +
111 (f'-{locale.territory}' if locale.territory else '')
112 )
113 if not locale_re.match(locale_tag):
114 website.tell_sentry(Warning(
115 f"the locale tag `{locale_tag}` doesn't match the format expected by PayPal; "
116 f"falling back to `{locale.language}`"
117 ))
118 locale_tag = locale.language
119 data = {
120 "intent": "CAPTURE",
121 "application_context": {
122 "brand_name": "Liberapay",
123 "cancel_url": cancel_url,
124 "locale": locale_tag,
125 "landing_page": "BILLING",
126 "shipping_preference": "NO_SHIPPING",
127 "user_action": "PAY_NOW",
128 "return_url": return_url,
129 },
130 "purchase_units": [{
131 "amount": {
132 "value": str(pt.amount.amount),
133 "currency_code": pt.amount.currency
134 },
135 "custom_id": str(pt.id),
136 "description": (
137 _("Liberapay donation to {username} (team {team_name})",
138 username=pt.recipient_username, team_name=pt.team_name)
139 if pt.team_name else
140 _("Liberapay donation to {username}", username=pt.recipient_username)
141 ) + ' | ' + (ngettext(
142 "{n} week of {money_amount}",
143 "{n} weeks of {money_amount}",
144 n=pt.n_units, money_amount=pt.unit_amount
145 ) if pt.period == 'weekly' else ngettext(
146 "{n} month of {money_amount}",
147 "{n} months of {money_amount}",
148 n=pt.n_units, money_amount=pt.unit_amount
149 ) if pt.period == 'monthly' else ngettext(
150 "{n} year of {money_amount}",
151 "{n} years of {money_amount}",
152 n=pt.n_units, money_amount=pt.unit_amount
153 )),
154 "payee": {
155 "email_address": pt.merchant_id,
156 },
157 "reference_id": str(pt.id),
158 "soft_descriptor": "Liberapay",
159 } for pt in transfers],
160 }
161 url = 'https://api.%s/v2/checkout/orders' % website.app_conf.paypal_domain
162 headers = {
163 'PayPal-Request-Id': 'payin_%i' % payin.id
164 }
165 response = _init_session().post(url, json=data, headers=headers)
166 if response.status_code not in (200, 201):
167 error = _extract_error_message(response)
168 return abort_payin(db, payin, error)
169 order = response.json()
170 status = ORDER_STATUSES_MAP[order['status']]
171 error = order['status'] if status == 'failed' else None
172 payin = update_payin(db, payin.id, order['id'], status, error)
173 if payin.status == 'awaiting_payer_action':
174 redirect_url = [l['href'] for l in order['links'] if l['rel'] == 'approve'][0]
175 raise state['response'].redirect(redirect_url)
176 return payin
177
178
179 def capture_order(db, payin):
180 """Capture a previously approved payment for an order.
181
182 Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_capture
183 """
184 url = 'https://api.%s/v2/checkout/orders/%s/capture' % (
185 website.app_conf.paypal_domain, payin.remote_id
186 )
187 headers = {
188 'PayPal-Request-Id': 'capture_order_%i' % payin.id,
189 'Prefer': 'return=representation',
190 }
191 response = _init_session().post(url, json={}, headers=headers)
192 if response.status_code not in (200, 201):
193 error = _extract_error_message(response)
194 return abort_payin(db, payin, error)
195 order = response.json()
196 return record_order_result(db, payin, order)
197
198
199 def record_order_result(db, payin, order):
200 """Update the status of a payin and its transfers in our database.
201 """
202 # Update the payin
203 status = ORDER_STATUSES_MAP[order['status']]
204 if status == 'awaiting_payer_action' and payin.status == 'failed':
205 # This payin has already been aborted, don't reset it.
206 return payin
207 error = order['status'] if status == 'failed' else None
208 refunded_amount = sum(
209 sum(
210 Money(refund['amount']['value'], refund['amount']['currency_code'])
211 for refund in pu.get('payments', {}).get('refunds', ())
212 )
213 for pu in order['purchase_units']
214 ) or None
215 payin = update_payin(
216 db, payin.id, order['id'], status, error, refunded_amount=refunded_amount
217 )
218
219 # Update the payin transfers
220 for pu in order['purchase_units']:
221 pt_id = pu['reference_id']
222 reversed_amount = payin.amount.zero()
223 for refund in pu.get('payments', {}).get('refunds', ()):
224 refund_amount = refund['amount']
225 refund_amount = Money(refund_amount['value'], refund_amount['currency_code'])
226 reversed_amount += refund_amount
227 refund_description = refund['note_to_payer']
228 refund_status = REFUND_STATUSES_MAP[refund['status']]
229 refund_error = refund.get('status_details', {}).get('reason')
230 payin_refund = record_payin_refund(
231 db, payin.id, refund['id'], refund_amount, None, refund_description,
232 refund_status, refund_error, refund['create_time'], notify=False,
233
234 )
235 record_payin_transfer_reversal(
236 db, pt_id, refund['id'], payin_refund.id, refund['create_time']
237 )
238 if reversed_amount == 0:
239 reversed_amount = None
240 for capture in pu.get('payments', {}).get('captures', ()):
241 pt_remote_id = capture['id']
242 pt_status = CAPTURE_STATUSES_MAP[capture['status']]
243 pt_error = capture.get('status_details', {}).get('reason')
244 breakdown = capture.get('seller_receivable_breakdown')
245 if breakdown and breakdown.get('paypal_fee'):
246 pt_fee = breakdown['paypal_fee']
247 pt_fee = Money(pt_fee['value'], pt_fee['currency_code'])
248 net_amount = breakdown['net_amount']
249 else:
250 pt_fee = None
251 net_amount = capture['amount']
252 net_amount = Money(net_amount['value'], net_amount['currency_code'])
253 update_payin_transfer(
254 db, pt_id, pt_remote_id, pt_status, pt_error,
255 amount=net_amount, fee=pt_fee, reversed_amount=reversed_amount
256 )
257
258 return payin
259
260
261 def sync_order(db, payin):
262 """Fetch the order's data and update our database.
263
264 Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_get
265 """
266 url = 'https://api.%s/v2/checkout/orders/%s' % (
267 website.app_conf.paypal_domain, payin.remote_id
268 )
269 response = _init_session().get(url)
270 if response.status_code != 200:
271 if payin.status == 'failed':
272 return payin
273 try:
274 error = response.json()
275 except Exception:
276 error = {}
277 expired = response.status_code == 404 and (
278 error.get('message') == "The specified resource does not exist." or
279 payin.ctime < (utcnow() - timedelta(days=30))
280 )
281 if expired:
282 return abort_payin(db, payin, "abandoned by payer")
283 error = response.text # for Sentry
284 logger.debug(error)
285 raise PaymentError('PayPal')
286 order = response.json()
287 return record_order_result(db, payin, order)
288
289
290 def sync_all_pending_payments(db=None):
291 """Calls `sync_order` for every pending payment.
292 """
293 db = db or website.db
294 payins = db.all("""
295 SELECT DISTINCT ON (pi.id) pi.*
296 FROM payin_transfers pt
297 JOIN payins pi ON pi.id = pt.payin
298 JOIN exchange_routes r ON r.id = pi.route
299 WHERE pt.status = 'pending'
300 AND r.network = 'paypal'
301 ORDER BY pi.id
302 """)
303 print("Syncing %i pending PayPal payments..." % len(payins))
304 for payin in payins:
305 try:
306 sync_order(db, payin)
307 except Exception as e:
308 website.tell_sentry(e)
309 sleep(0.2)
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/liberapay/payin/paypal.py b/liberapay/payin/paypal.py
--- a/liberapay/payin/paypal.py
+++ b/liberapay/payin/paypal.py
@@ -224,7 +224,7 @@
refund_amount = refund['amount']
refund_amount = Money(refund_amount['value'], refund_amount['currency_code'])
reversed_amount += refund_amount
- refund_description = refund['note_to_payer']
+ refund_description = refund.get('note_to_payer')
refund_status = REFUND_STATUSES_MAP[refund['status']]
refund_error = refund.get('status_details', {}).get('reason')
payin_refund = record_payin_refund(
|
{"golden_diff": "diff --git a/liberapay/payin/paypal.py b/liberapay/payin/paypal.py\n--- a/liberapay/payin/paypal.py\n+++ b/liberapay/payin/paypal.py\n@@ -224,7 +224,7 @@\n refund_amount = refund['amount']\n refund_amount = Money(refund_amount['value'], refund_amount['currency_code'])\n reversed_amount += refund_amount\n- refund_description = refund['note_to_payer']\n+ refund_description = refund.get('note_to_payer')\n refund_status = REFUND_STATUSES_MAP[refund['status']]\n refund_error = refund.get('status_details', {}).get('reason')\n payin_refund = record_payin_refund(\n", "issue": "[Feature Request] Always put your primary email address at the top\nI first added the \"@qq.com\" email address, and then added the \"@protonmail.com\" email address and set it as the new primary email address.\r\nBut it was ranked second. Obviously, this order is in chronological order of adding. But the primary email address is already the \"primary\" address, so why shouldn't it be ranked first?\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import timedelta\nimport logging\nimport re\nfrom time import sleep\n\nimport requests\nfrom pando.utils import utcnow\n\nfrom ..exceptions import PaymentError\nfrom ..i18n.currencies import Money\nfrom ..website import website\nfrom .common import (\n abort_payin, update_payin, update_payin_transfer, record_payin_refund,\n record_payin_transfer_reversal,\n)\n\n\nlogger = logging.getLogger('paypal')\n\nsession = requests.Session()\n\n\ndef _extract_error_message(response):\n try:\n error = response.json()\n message = error['message']\n assert message\n details = error.get('details')\n if details and isinstance(details, list):\n message = ' | '.join(\n ('%(issue)s: %(description)s' % d if d.get('issue') else d['description'])\n for d in details if d.get('description')\n ) or message\n debug_id = error.get('debug_id')\n if debug_id:\n message += \" | PayPal debug_id: \" + debug_id\n return message\n except Exception:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n\n\ndef _init_session():\n # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret\n if 'Authentication' in session.headers:\n return session\n from base64 import b64encode\n session.headers.update({\n 'Authorization': 'Basic ' + b64encode((\n '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret)\n ).encode('ascii')).decode('ascii'),\n })\n return session\n\n\n# Version 2\n# =========\n\nCAPTURE_STATUSES_MAP = {\n 'COMPLETED': 'succeeded',\n 'DECLINED': 'failed',\n 'PARTIALLY_REFUNDED': 'succeeded',\n 'PENDING': 'pending',\n 'REFUNDED': 'succeeded',\n}\nORDER_STATUSES_MAP = {\n 'APPROVED': 'pending',\n 'COMPLETED': 'succeeded',\n 'CREATED': 'awaiting_payer_action',\n 'SAVED': 'pending',\n 'VOIDED': 'failed',\n}\nREFUND_STATUSES_MAP = {\n 'CANCELLED': 'failed',\n 'COMPLETED': 'succeeded',\n 'FAILED': 'failed',\n 'PENDING': 'pending',\n}\n\nlocale_re = re.compile(\"^[a-z]{2}(?:-[A-Z][a-z]{3})?(?:-(?:[A-Z]{2}))?$\")\n\n\ndef create_order(db, payin, payer, return_url, cancel_url, state):\n \"\"\"Create an Order.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_create\n\n Note: even though the API expects a list of purchase_units it rejects the\n request if the list contains more than one of them.\n \"\"\"\n transfers = db.all(\"\"\"\n SELECT pt.*\n , recipient.username AS recipient_username\n , team.username AS team_name\n , a.id AS merchant_id\n FROM payin_transfers pt\n JOIN participants recipient ON recipient.id = pt.recipient\n LEFT JOIN participants team ON team.id = pt.team\n JOIN payment_accounts a ON a.pk = pt.destination\n WHERE pt.payin = %s\n ORDER BY pt.id\n \"\"\", (payin.id,))\n assert transfers\n locale, _, ngettext = state['locale'], state['_'], state['ngettext']\n # PayPal processes BCP47 tags in a case-sensitive way, and completely rejects\n # requests containing \"improperly\" cased values.\n locale_tag = (\n locale.language +\n (f'-{locale.script}' if locale.script else '') +\n (f'-{locale.territory}' if locale.territory else '')\n )\n if not locale_re.match(locale_tag):\n website.tell_sentry(Warning(\n f\"the locale tag `{locale_tag}` doesn't match the format expected by PayPal; \"\n f\"falling back to `{locale.language}`\"\n ))\n locale_tag = locale.language\n data = {\n \"intent\": \"CAPTURE\",\n \"application_context\": {\n \"brand_name\": \"Liberapay\",\n \"cancel_url\": cancel_url,\n \"locale\": locale_tag,\n \"landing_page\": \"BILLING\",\n \"shipping_preference\": \"NO_SHIPPING\",\n \"user_action\": \"PAY_NOW\",\n \"return_url\": return_url,\n },\n \"purchase_units\": [{\n \"amount\": {\n \"value\": str(pt.amount.amount),\n \"currency_code\": pt.amount.currency\n },\n \"custom_id\": str(pt.id),\n \"description\": (\n _(\"Liberapay donation to {username} (team {team_name})\",\n username=pt.recipient_username, team_name=pt.team_name)\n if pt.team_name else\n _(\"Liberapay donation to {username}\", username=pt.recipient_username)\n ) + ' | ' + (ngettext(\n \"{n} week of {money_amount}\",\n \"{n} weeks of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n ) if pt.period == 'weekly' else ngettext(\n \"{n} month of {money_amount}\",\n \"{n} months of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n ) if pt.period == 'monthly' else ngettext(\n \"{n} year of {money_amount}\",\n \"{n} years of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n )),\n \"payee\": {\n \"email_address\": pt.merchant_id,\n },\n \"reference_id\": str(pt.id),\n \"soft_descriptor\": \"Liberapay\",\n } for pt in transfers],\n }\n url = 'https://api.%s/v2/checkout/orders' % website.app_conf.paypal_domain\n headers = {\n 'PayPal-Request-Id': 'payin_%i' % payin.id\n }\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code not in (200, 201):\n error = _extract_error_message(response)\n return abort_payin(db, payin, error)\n order = response.json()\n status = ORDER_STATUSES_MAP[order['status']]\n error = order['status'] if status == 'failed' else None\n payin = update_payin(db, payin.id, order['id'], status, error)\n if payin.status == 'awaiting_payer_action':\n redirect_url = [l['href'] for l in order['links'] if l['rel'] == 'approve'][0]\n raise state['response'].redirect(redirect_url)\n return payin\n\n\ndef capture_order(db, payin):\n \"\"\"Capture a previously approved payment for an order.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_capture\n \"\"\"\n url = 'https://api.%s/v2/checkout/orders/%s/capture' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n headers = {\n 'PayPal-Request-Id': 'capture_order_%i' % payin.id,\n 'Prefer': 'return=representation',\n }\n response = _init_session().post(url, json={}, headers=headers)\n if response.status_code not in (200, 201):\n error = _extract_error_message(response)\n return abort_payin(db, payin, error)\n order = response.json()\n return record_order_result(db, payin, order)\n\n\ndef record_order_result(db, payin, order):\n \"\"\"Update the status of a payin and its transfers in our database.\n \"\"\"\n # Update the payin\n status = ORDER_STATUSES_MAP[order['status']]\n if status == 'awaiting_payer_action' and payin.status == 'failed':\n # This payin has already been aborted, don't reset it.\n return payin\n error = order['status'] if status == 'failed' else None\n refunded_amount = sum(\n sum(\n Money(refund['amount']['value'], refund['amount']['currency_code'])\n for refund in pu.get('payments', {}).get('refunds', ())\n )\n for pu in order['purchase_units']\n ) or None\n payin = update_payin(\n db, payin.id, order['id'], status, error, refunded_amount=refunded_amount\n )\n\n # Update the payin transfers\n for pu in order['purchase_units']:\n pt_id = pu['reference_id']\n reversed_amount = payin.amount.zero()\n for refund in pu.get('payments', {}).get('refunds', ()):\n refund_amount = refund['amount']\n refund_amount = Money(refund_amount['value'], refund_amount['currency_code'])\n reversed_amount += refund_amount\n refund_description = refund['note_to_payer']\n refund_status = REFUND_STATUSES_MAP[refund['status']]\n refund_error = refund.get('status_details', {}).get('reason')\n payin_refund = record_payin_refund(\n db, payin.id, refund['id'], refund_amount, None, refund_description,\n refund_status, refund_error, refund['create_time'], notify=False,\n\n )\n record_payin_transfer_reversal(\n db, pt_id, refund['id'], payin_refund.id, refund['create_time']\n )\n if reversed_amount == 0:\n reversed_amount = None\n for capture in pu.get('payments', {}).get('captures', ()):\n pt_remote_id = capture['id']\n pt_status = CAPTURE_STATUSES_MAP[capture['status']]\n pt_error = capture.get('status_details', {}).get('reason')\n breakdown = capture.get('seller_receivable_breakdown')\n if breakdown and breakdown.get('paypal_fee'):\n pt_fee = breakdown['paypal_fee']\n pt_fee = Money(pt_fee['value'], pt_fee['currency_code'])\n net_amount = breakdown['net_amount']\n else:\n pt_fee = None\n net_amount = capture['amount']\n net_amount = Money(net_amount['value'], net_amount['currency_code'])\n update_payin_transfer(\n db, pt_id, pt_remote_id, pt_status, pt_error,\n amount=net_amount, fee=pt_fee, reversed_amount=reversed_amount\n )\n\n return payin\n\n\ndef sync_order(db, payin):\n \"\"\"Fetch the order's data and update our database.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_get\n \"\"\"\n url = 'https://api.%s/v2/checkout/orders/%s' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n response = _init_session().get(url)\n if response.status_code != 200:\n if payin.status == 'failed':\n return payin\n try:\n error = response.json()\n except Exception:\n error = {}\n expired = response.status_code == 404 and (\n error.get('message') == \"The specified resource does not exist.\" or\n payin.ctime < (utcnow() - timedelta(days=30))\n )\n if expired:\n return abort_payin(db, payin, \"abandoned by payer\")\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n order = response.json()\n return record_order_result(db, payin, order)\n\n\ndef sync_all_pending_payments(db=None):\n \"\"\"Calls `sync_order` for every pending payment.\n \"\"\"\n db = db or website.db\n payins = db.all(\"\"\"\n SELECT DISTINCT ON (pi.id) pi.*\n FROM payin_transfers pt\n JOIN payins pi ON pi.id = pt.payin\n JOIN exchange_routes r ON r.id = pi.route\n WHERE pt.status = 'pending'\n AND r.network = 'paypal'\n ORDER BY pi.id\n \"\"\")\n print(\"Syncing %i pending PayPal payments...\" % len(payins))\n for payin in payins:\n try:\n sync_order(db, payin)\n except Exception as e:\n website.tell_sentry(e)\n sleep(0.2)\n", "path": "liberapay/payin/paypal.py"}], "after_files": [{"content": "from datetime import timedelta\nimport logging\nimport re\nfrom time import sleep\n\nimport requests\nfrom pando.utils import utcnow\n\nfrom ..exceptions import PaymentError\nfrom ..i18n.currencies import Money\nfrom ..website import website\nfrom .common import (\n abort_payin, update_payin, update_payin_transfer, record_payin_refund,\n record_payin_transfer_reversal,\n)\n\n\nlogger = logging.getLogger('paypal')\n\nsession = requests.Session()\n\n\ndef _extract_error_message(response):\n try:\n error = response.json()\n message = error['message']\n assert message\n details = error.get('details')\n if details and isinstance(details, list):\n message = ' | '.join(\n ('%(issue)s: %(description)s' % d if d.get('issue') else d['description'])\n for d in details if d.get('description')\n ) or message\n debug_id = error.get('debug_id')\n if debug_id:\n message += \" | PayPal debug_id: \" + debug_id\n return message\n except Exception:\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n\n\ndef _init_session():\n # TODO switch to bearer tokens to reduce the risk of exposing the long-lived secret\n if 'Authentication' in session.headers:\n return session\n from base64 import b64encode\n session.headers.update({\n 'Authorization': 'Basic ' + b64encode((\n '%s:%s' % (website.app_conf.paypal_id, website.app_conf.paypal_secret)\n ).encode('ascii')).decode('ascii'),\n })\n return session\n\n\n# Version 2\n# =========\n\nCAPTURE_STATUSES_MAP = {\n 'COMPLETED': 'succeeded',\n 'DECLINED': 'failed',\n 'PARTIALLY_REFUNDED': 'succeeded',\n 'PENDING': 'pending',\n 'REFUNDED': 'succeeded',\n}\nORDER_STATUSES_MAP = {\n 'APPROVED': 'pending',\n 'COMPLETED': 'succeeded',\n 'CREATED': 'awaiting_payer_action',\n 'SAVED': 'pending',\n 'VOIDED': 'failed',\n}\nREFUND_STATUSES_MAP = {\n 'CANCELLED': 'failed',\n 'COMPLETED': 'succeeded',\n 'FAILED': 'failed',\n 'PENDING': 'pending',\n}\n\nlocale_re = re.compile(\"^[a-z]{2}(?:-[A-Z][a-z]{3})?(?:-(?:[A-Z]{2}))?$\")\n\n\ndef create_order(db, payin, payer, return_url, cancel_url, state):\n \"\"\"Create an Order.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_create\n\n Note: even though the API expects a list of purchase_units it rejects the\n request if the list contains more than one of them.\n \"\"\"\n transfers = db.all(\"\"\"\n SELECT pt.*\n , recipient.username AS recipient_username\n , team.username AS team_name\n , a.id AS merchant_id\n FROM payin_transfers pt\n JOIN participants recipient ON recipient.id = pt.recipient\n LEFT JOIN participants team ON team.id = pt.team\n JOIN payment_accounts a ON a.pk = pt.destination\n WHERE pt.payin = %s\n ORDER BY pt.id\n \"\"\", (payin.id,))\n assert transfers\n locale, _, ngettext = state['locale'], state['_'], state['ngettext']\n # PayPal processes BCP47 tags in a case-sensitive way, and completely rejects\n # requests containing \"improperly\" cased values.\n locale_tag = (\n locale.language +\n (f'-{locale.script}' if locale.script else '') +\n (f'-{locale.territory}' if locale.territory else '')\n )\n if not locale_re.match(locale_tag):\n website.tell_sentry(Warning(\n f\"the locale tag `{locale_tag}` doesn't match the format expected by PayPal; \"\n f\"falling back to `{locale.language}`\"\n ))\n locale_tag = locale.language\n data = {\n \"intent\": \"CAPTURE\",\n \"application_context\": {\n \"brand_name\": \"Liberapay\",\n \"cancel_url\": cancel_url,\n \"locale\": locale_tag,\n \"landing_page\": \"BILLING\",\n \"shipping_preference\": \"NO_SHIPPING\",\n \"user_action\": \"PAY_NOW\",\n \"return_url\": return_url,\n },\n \"purchase_units\": [{\n \"amount\": {\n \"value\": str(pt.amount.amount),\n \"currency_code\": pt.amount.currency\n },\n \"custom_id\": str(pt.id),\n \"description\": (\n _(\"Liberapay donation to {username} (team {team_name})\",\n username=pt.recipient_username, team_name=pt.team_name)\n if pt.team_name else\n _(\"Liberapay donation to {username}\", username=pt.recipient_username)\n ) + ' | ' + (ngettext(\n \"{n} week of {money_amount}\",\n \"{n} weeks of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n ) if pt.period == 'weekly' else ngettext(\n \"{n} month of {money_amount}\",\n \"{n} months of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n ) if pt.period == 'monthly' else ngettext(\n \"{n} year of {money_amount}\",\n \"{n} years of {money_amount}\",\n n=pt.n_units, money_amount=pt.unit_amount\n )),\n \"payee\": {\n \"email_address\": pt.merchant_id,\n },\n \"reference_id\": str(pt.id),\n \"soft_descriptor\": \"Liberapay\",\n } for pt in transfers],\n }\n url = 'https://api.%s/v2/checkout/orders' % website.app_conf.paypal_domain\n headers = {\n 'PayPal-Request-Id': 'payin_%i' % payin.id\n }\n response = _init_session().post(url, json=data, headers=headers)\n if response.status_code not in (200, 201):\n error = _extract_error_message(response)\n return abort_payin(db, payin, error)\n order = response.json()\n status = ORDER_STATUSES_MAP[order['status']]\n error = order['status'] if status == 'failed' else None\n payin = update_payin(db, payin.id, order['id'], status, error)\n if payin.status == 'awaiting_payer_action':\n redirect_url = [l['href'] for l in order['links'] if l['rel'] == 'approve'][0]\n raise state['response'].redirect(redirect_url)\n return payin\n\n\ndef capture_order(db, payin):\n \"\"\"Capture a previously approved payment for an order.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_capture\n \"\"\"\n url = 'https://api.%s/v2/checkout/orders/%s/capture' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n headers = {\n 'PayPal-Request-Id': 'capture_order_%i' % payin.id,\n 'Prefer': 'return=representation',\n }\n response = _init_session().post(url, json={}, headers=headers)\n if response.status_code not in (200, 201):\n error = _extract_error_message(response)\n return abort_payin(db, payin, error)\n order = response.json()\n return record_order_result(db, payin, order)\n\n\ndef record_order_result(db, payin, order):\n \"\"\"Update the status of a payin and its transfers in our database.\n \"\"\"\n # Update the payin\n status = ORDER_STATUSES_MAP[order['status']]\n if status == 'awaiting_payer_action' and payin.status == 'failed':\n # This payin has already been aborted, don't reset it.\n return payin\n error = order['status'] if status == 'failed' else None\n refunded_amount = sum(\n sum(\n Money(refund['amount']['value'], refund['amount']['currency_code'])\n for refund in pu.get('payments', {}).get('refunds', ())\n )\n for pu in order['purchase_units']\n ) or None\n payin = update_payin(\n db, payin.id, order['id'], status, error, refunded_amount=refunded_amount\n )\n\n # Update the payin transfers\n for pu in order['purchase_units']:\n pt_id = pu['reference_id']\n reversed_amount = payin.amount.zero()\n for refund in pu.get('payments', {}).get('refunds', ()):\n refund_amount = refund['amount']\n refund_amount = Money(refund_amount['value'], refund_amount['currency_code'])\n reversed_amount += refund_amount\n refund_description = refund.get('note_to_payer')\n refund_status = REFUND_STATUSES_MAP[refund['status']]\n refund_error = refund.get('status_details', {}).get('reason')\n payin_refund = record_payin_refund(\n db, payin.id, refund['id'], refund_amount, None, refund_description,\n refund_status, refund_error, refund['create_time'], notify=False,\n\n )\n record_payin_transfer_reversal(\n db, pt_id, refund['id'], payin_refund.id, refund['create_time']\n )\n if reversed_amount == 0:\n reversed_amount = None\n for capture in pu.get('payments', {}).get('captures', ()):\n pt_remote_id = capture['id']\n pt_status = CAPTURE_STATUSES_MAP[capture['status']]\n pt_error = capture.get('status_details', {}).get('reason')\n breakdown = capture.get('seller_receivable_breakdown')\n if breakdown and breakdown.get('paypal_fee'):\n pt_fee = breakdown['paypal_fee']\n pt_fee = Money(pt_fee['value'], pt_fee['currency_code'])\n net_amount = breakdown['net_amount']\n else:\n pt_fee = None\n net_amount = capture['amount']\n net_amount = Money(net_amount['value'], net_amount['currency_code'])\n update_payin_transfer(\n db, pt_id, pt_remote_id, pt_status, pt_error,\n amount=net_amount, fee=pt_fee, reversed_amount=reversed_amount\n )\n\n return payin\n\n\ndef sync_order(db, payin):\n \"\"\"Fetch the order's data and update our database.\n\n Doc: https://developer.paypal.com/docs/api/orders/v2/#orders_get\n \"\"\"\n url = 'https://api.%s/v2/checkout/orders/%s' % (\n website.app_conf.paypal_domain, payin.remote_id\n )\n response = _init_session().get(url)\n if response.status_code != 200:\n if payin.status == 'failed':\n return payin\n try:\n error = response.json()\n except Exception:\n error = {}\n expired = response.status_code == 404 and (\n error.get('message') == \"The specified resource does not exist.\" or\n payin.ctime < (utcnow() - timedelta(days=30))\n )\n if expired:\n return abort_payin(db, payin, \"abandoned by payer\")\n error = response.text # for Sentry\n logger.debug(error)\n raise PaymentError('PayPal')\n order = response.json()\n return record_order_result(db, payin, order)\n\n\ndef sync_all_pending_payments(db=None):\n \"\"\"Calls `sync_order` for every pending payment.\n \"\"\"\n db = db or website.db\n payins = db.all(\"\"\"\n SELECT DISTINCT ON (pi.id) pi.*\n FROM payin_transfers pt\n JOIN payins pi ON pi.id = pt.payin\n JOIN exchange_routes r ON r.id = pi.route\n WHERE pt.status = 'pending'\n AND r.network = 'paypal'\n ORDER BY pi.id\n \"\"\")\n print(\"Syncing %i pending PayPal payments...\" % len(payins))\n for payin in payins:\n try:\n sync_order(db, payin)\n except Exception as e:\n website.tell_sentry(e)\n sleep(0.2)\n", "path": "liberapay/payin/paypal.py"}]}
| 3,900 | 160 |
gh_patches_debug_8799
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-9612
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Last Active Organization not set in API
Currently, a user's last active organization is set only in the Django code: https://github.com/getsentry/sentry/blob/master/src/sentry/web/frontend/base.py#L34
This means that last active organization is not set when a user navigates to a view via a front-end route.
As more of Sentry's views are converted to React, we will lose accurate functionality around a user's last active organization.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/api/bases/organization.py`
Content:
```
1 from __future__ import absolute_import
2
3 from sentry.api.base import Endpoint, logger
4 from sentry.api.exceptions import ResourceDoesNotExist, SsoRequired, TwoFactorRequired
5 from sentry.api.permissions import ScopedPermission
6 from sentry.app import raven
7 from sentry.auth import access
8 from sentry.auth.superuser import is_active_superuser
9 from sentry.models import (
10 ApiKey, Authenticator, Organization, OrganizationMemberTeam, Project, ProjectTeam, ReleaseProject, Team
11 )
12 from sentry.utils import auth
13
14
15 class OrganizationPermission(ScopedPermission):
16 scope_map = {
17 'GET': ['org:read', 'org:write', 'org:admin'],
18 'POST': ['org:write', 'org:admin'],
19 'PUT': ['org:write', 'org:admin'],
20 'DELETE': ['org:admin'],
21 }
22
23 def is_not_2fa_compliant(self, user, organization):
24 return organization.flags.require_2fa and not Authenticator.objects.user_has_2fa(user)
25
26 def needs_sso(self, request, organization):
27 # XXX(dcramer): this is very similar to the server-rendered views
28 # logic for checking valid SSO
29 if not request.access.requires_sso:
30 return False
31 if not auth.has_completed_sso(request, organization.id):
32 return True
33 if not request.access.sso_is_valid:
34 return True
35 return False
36
37 def has_object_permission(self, request, view, organization):
38 if request.user and request.user.is_authenticated() and request.auth:
39 request.access = access.from_request(
40 request,
41 organization,
42 scopes=request.auth.get_scopes(),
43 )
44
45 elif request.auth:
46 if request.auth.organization_id == organization.id:
47 request.access = access.from_auth(request.auth)
48 else:
49 request.access = access.DEFAULT
50
51 else:
52 request.access = access.from_request(request, organization)
53
54 if auth.is_user_signed_request(request):
55 # if the user comes from a signed request
56 # we let them pass if sso is enabled
57 logger.info(
58 'access.signed-sso-passthrough',
59 extra={
60 'organization_id': organization.id,
61 'user_id': request.user.id,
62 }
63 )
64 elif request.user.is_authenticated():
65 # session auth needs to confirm various permissions
66 if self.needs_sso(request, organization):
67
68 logger.info(
69 'access.must-sso',
70 extra={
71 'organization_id': organization.id,
72 'user_id': request.user.id,
73 }
74 )
75
76 raise SsoRequired(organization)
77
78 if self.is_not_2fa_compliant(
79 request.user, organization):
80 logger.info(
81 'access.not-2fa-compliant',
82 extra={
83 'organization_id': organization.id,
84 'user_id': request.user.id,
85 }
86 )
87 raise TwoFactorRequired()
88
89 allowed_scopes = set(self.scope_map.get(request.method, []))
90 return any(request.access.has_scope(s) for s in allowed_scopes)
91
92
93 # These are based on ProjectReleasePermission
94 # additional checks to limit actions to releases
95 # associated with projects people have access to
96 class OrganizationReleasePermission(OrganizationPermission):
97 scope_map = {
98 'GET': ['project:read', 'project:write', 'project:admin', 'project:releases'],
99 'POST': ['project:write', 'project:admin', 'project:releases'],
100 'PUT': ['project:write', 'project:admin', 'project:releases'],
101 'DELETE': ['project:admin', 'project:releases'],
102 }
103
104
105 class OrganizationIntegrationsPermission(OrganizationPermission):
106 scope_map = {
107 'GET': ['org:read', 'org:write', 'org:admin', 'org:integrations'],
108 'POST': ['org:write', 'org:admin', 'org:integrations'],
109 'PUT': ['org:write', 'org:admin', 'org:integrations'],
110 'DELETE': ['org:admin', 'org:integrations'],
111 }
112
113
114 class OrganizationAdminPermission(OrganizationPermission):
115 scope_map = {
116 'GET': ['org:admin'],
117 'POST': ['org:admin'],
118 'PUT': ['org:admin'],
119 'DELETE': ['org:admin'],
120 }
121
122
123 class OrganizationAuthProviderPermission(OrganizationPermission):
124 scope_map = {
125 'GET': ['org:read'],
126 'POST': ['org:admin'],
127 'PUT': ['org:admin'],
128 'DELETE': ['org:admin'],
129 }
130
131
132 class OrganizationEndpoint(Endpoint):
133 permission_classes = (OrganizationPermission, )
134
135 def convert_args(self, request, organization_slug, *args, **kwargs):
136 try:
137 organization = Organization.objects.get_from_cache(
138 slug=organization_slug,
139 )
140 except Organization.DoesNotExist:
141 raise ResourceDoesNotExist
142
143 self.check_object_permissions(request, organization)
144
145 raven.tags_context({
146 'organization': organization.id,
147 })
148
149 request._request.organization = organization
150
151 kwargs['organization'] = organization
152 return (args, kwargs)
153
154
155 class OrganizationReleasesBaseEndpoint(OrganizationEndpoint):
156 permission_classes = (OrganizationReleasePermission, )
157
158 def get_allowed_projects(self, request, organization):
159 has_valid_api_key = False
160 if isinstance(request.auth, ApiKey):
161 if request.auth.organization_id != organization.id:
162 return []
163 has_valid_api_key = request.auth.has_scope('project:releases') or \
164 request.auth.has_scope('project:write')
165
166 if not (has_valid_api_key or request.user.is_authenticated()):
167 return []
168
169 if has_valid_api_key or is_active_superuser(request) or organization.flags.allow_joinleave:
170 allowed_teams = Team.objects.filter(organization=organization).values_list(
171 'id', flat=True
172 )
173 else:
174 allowed_teams = OrganizationMemberTeam.objects.filter(
175 organizationmember__user=request.user,
176 team__organization_id=organization.id,
177 ).values_list(
178 'team_id', flat=True
179 )
180
181 return Project.objects.filter(
182 id__in=ProjectTeam.objects.filter(
183 team_id__in=allowed_teams,
184 ).values_list('project_id', flat=True)
185 )
186
187 def has_release_permission(self, request, organization, release):
188 return ReleaseProject.objects.filter(
189 release=release,
190 project__in=self.get_allowed_projects(request, organization),
191 ).exists()
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/api/bases/organization.py b/src/sentry/api/bases/organization.py
--- a/src/sentry/api/bases/organization.py
+++ b/src/sentry/api/bases/organization.py
@@ -145,9 +145,13 @@
raven.tags_context({
'organization': organization.id,
})
-
request._request.organization = organization
+ # Track the 'active' organization when the request came from
+ # a cookie based agent (react app)
+ if request.auth is None and request.user:
+ request.session['activeorg'] = organization.slug
+
kwargs['organization'] = organization
return (args, kwargs)
|
{"golden_diff": "diff --git a/src/sentry/api/bases/organization.py b/src/sentry/api/bases/organization.py\n--- a/src/sentry/api/bases/organization.py\n+++ b/src/sentry/api/bases/organization.py\n@@ -145,9 +145,13 @@\n raven.tags_context({\n 'organization': organization.id,\n })\n-\n request._request.organization = organization\n \n+ # Track the 'active' organization when the request came from\n+ # a cookie based agent (react app)\n+ if request.auth is None and request.user:\n+ request.session['activeorg'] = organization.slug\n+\n kwargs['organization'] = organization\n return (args, kwargs)\n", "issue": "Last Active Organization not set in API\nCurrently, a user's last active organization is set only in the Django code: https://github.com/getsentry/sentry/blob/master/src/sentry/web/frontend/base.py#L34\r\n\r\nThis means that last active organization is not set when a user navigates to a view via a front-end route.\r\n\r\nAs more of Sentry's views are converted to React, we will lose accurate functionality around a user's last active organization.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry.api.base import Endpoint, logger\nfrom sentry.api.exceptions import ResourceDoesNotExist, SsoRequired, TwoFactorRequired\nfrom sentry.api.permissions import ScopedPermission\nfrom sentry.app import raven\nfrom sentry.auth import access\nfrom sentry.auth.superuser import is_active_superuser\nfrom sentry.models import (\n ApiKey, Authenticator, Organization, OrganizationMemberTeam, Project, ProjectTeam, ReleaseProject, Team\n)\nfrom sentry.utils import auth\n\n\nclass OrganizationPermission(ScopedPermission):\n scope_map = {\n 'GET': ['org:read', 'org:write', 'org:admin'],\n 'POST': ['org:write', 'org:admin'],\n 'PUT': ['org:write', 'org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n def is_not_2fa_compliant(self, user, organization):\n return organization.flags.require_2fa and not Authenticator.objects.user_has_2fa(user)\n\n def needs_sso(self, request, organization):\n # XXX(dcramer): this is very similar to the server-rendered views\n # logic for checking valid SSO\n if not request.access.requires_sso:\n return False\n if not auth.has_completed_sso(request, organization.id):\n return True\n if not request.access.sso_is_valid:\n return True\n return False\n\n def has_object_permission(self, request, view, organization):\n if request.user and request.user.is_authenticated() and request.auth:\n request.access = access.from_request(\n request,\n organization,\n scopes=request.auth.get_scopes(),\n )\n\n elif request.auth:\n if request.auth.organization_id == organization.id:\n request.access = access.from_auth(request.auth)\n else:\n request.access = access.DEFAULT\n\n else:\n request.access = access.from_request(request, organization)\n\n if auth.is_user_signed_request(request):\n # if the user comes from a signed request\n # we let them pass if sso is enabled\n logger.info(\n 'access.signed-sso-passthrough',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n elif request.user.is_authenticated():\n # session auth needs to confirm various permissions\n if self.needs_sso(request, organization):\n\n logger.info(\n 'access.must-sso',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n\n raise SsoRequired(organization)\n\n if self.is_not_2fa_compliant(\n request.user, organization):\n logger.info(\n 'access.not-2fa-compliant',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n raise TwoFactorRequired()\n\n allowed_scopes = set(self.scope_map.get(request.method, []))\n return any(request.access.has_scope(s) for s in allowed_scopes)\n\n\n# These are based on ProjectReleasePermission\n# additional checks to limit actions to releases\n# associated with projects people have access to\nclass OrganizationReleasePermission(OrganizationPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin', 'project:releases'],\n 'POST': ['project:write', 'project:admin', 'project:releases'],\n 'PUT': ['project:write', 'project:admin', 'project:releases'],\n 'DELETE': ['project:admin', 'project:releases'],\n }\n\n\nclass OrganizationIntegrationsPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:read', 'org:write', 'org:admin', 'org:integrations'],\n 'POST': ['org:write', 'org:admin', 'org:integrations'],\n 'PUT': ['org:write', 'org:admin', 'org:integrations'],\n 'DELETE': ['org:admin', 'org:integrations'],\n }\n\n\nclass OrganizationAdminPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:admin'],\n 'POST': ['org:admin'],\n 'PUT': ['org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n\nclass OrganizationAuthProviderPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:read'],\n 'POST': ['org:admin'],\n 'PUT': ['org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n\nclass OrganizationEndpoint(Endpoint):\n permission_classes = (OrganizationPermission, )\n\n def convert_args(self, request, organization_slug, *args, **kwargs):\n try:\n organization = Organization.objects.get_from_cache(\n slug=organization_slug,\n )\n except Organization.DoesNotExist:\n raise ResourceDoesNotExist\n\n self.check_object_permissions(request, organization)\n\n raven.tags_context({\n 'organization': organization.id,\n })\n\n request._request.organization = organization\n\n kwargs['organization'] = organization\n return (args, kwargs)\n\n\nclass OrganizationReleasesBaseEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationReleasePermission, )\n\n def get_allowed_projects(self, request, organization):\n has_valid_api_key = False\n if isinstance(request.auth, ApiKey):\n if request.auth.organization_id != organization.id:\n return []\n has_valid_api_key = request.auth.has_scope('project:releases') or \\\n request.auth.has_scope('project:write')\n\n if not (has_valid_api_key or request.user.is_authenticated()):\n return []\n\n if has_valid_api_key or is_active_superuser(request) or organization.flags.allow_joinleave:\n allowed_teams = Team.objects.filter(organization=organization).values_list(\n 'id', flat=True\n )\n else:\n allowed_teams = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user,\n team__organization_id=organization.id,\n ).values_list(\n 'team_id', flat=True\n )\n\n return Project.objects.filter(\n id__in=ProjectTeam.objects.filter(\n team_id__in=allowed_teams,\n ).values_list('project_id', flat=True)\n )\n\n def has_release_permission(self, request, organization, release):\n return ReleaseProject.objects.filter(\n release=release,\n project__in=self.get_allowed_projects(request, organization),\n ).exists()\n", "path": "src/sentry/api/bases/organization.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry.api.base import Endpoint, logger\nfrom sentry.api.exceptions import ResourceDoesNotExist, SsoRequired, TwoFactorRequired\nfrom sentry.api.permissions import ScopedPermission\nfrom sentry.app import raven\nfrom sentry.auth import access\nfrom sentry.auth.superuser import is_active_superuser\nfrom sentry.models import (\n ApiKey, Authenticator, Organization, OrganizationMemberTeam, Project, ProjectTeam, ReleaseProject, Team\n)\nfrom sentry.utils import auth\n\n\nclass OrganizationPermission(ScopedPermission):\n scope_map = {\n 'GET': ['org:read', 'org:write', 'org:admin'],\n 'POST': ['org:write', 'org:admin'],\n 'PUT': ['org:write', 'org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n def is_not_2fa_compliant(self, user, organization):\n return organization.flags.require_2fa and not Authenticator.objects.user_has_2fa(user)\n\n def needs_sso(self, request, organization):\n # XXX(dcramer): this is very similar to the server-rendered views\n # logic for checking valid SSO\n if not request.access.requires_sso:\n return False\n if not auth.has_completed_sso(request, organization.id):\n return True\n if not request.access.sso_is_valid:\n return True\n return False\n\n def has_object_permission(self, request, view, organization):\n if request.user and request.user.is_authenticated() and request.auth:\n request.access = access.from_request(\n request,\n organization,\n scopes=request.auth.get_scopes(),\n )\n\n elif request.auth:\n if request.auth.organization_id == organization.id:\n request.access = access.from_auth(request.auth)\n else:\n request.access = access.DEFAULT\n\n else:\n request.access = access.from_request(request, organization)\n\n if auth.is_user_signed_request(request):\n # if the user comes from a signed request\n # we let them pass if sso is enabled\n logger.info(\n 'access.signed-sso-passthrough',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n elif request.user.is_authenticated():\n # session auth needs to confirm various permissions\n if self.needs_sso(request, organization):\n\n logger.info(\n 'access.must-sso',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n\n raise SsoRequired(organization)\n\n if self.is_not_2fa_compliant(\n request.user, organization):\n logger.info(\n 'access.not-2fa-compliant',\n extra={\n 'organization_id': organization.id,\n 'user_id': request.user.id,\n }\n )\n raise TwoFactorRequired()\n\n allowed_scopes = set(self.scope_map.get(request.method, []))\n return any(request.access.has_scope(s) for s in allowed_scopes)\n\n\n# These are based on ProjectReleasePermission\n# additional checks to limit actions to releases\n# associated with projects people have access to\nclass OrganizationReleasePermission(OrganizationPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin', 'project:releases'],\n 'POST': ['project:write', 'project:admin', 'project:releases'],\n 'PUT': ['project:write', 'project:admin', 'project:releases'],\n 'DELETE': ['project:admin', 'project:releases'],\n }\n\n\nclass OrganizationIntegrationsPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:read', 'org:write', 'org:admin', 'org:integrations'],\n 'POST': ['org:write', 'org:admin', 'org:integrations'],\n 'PUT': ['org:write', 'org:admin', 'org:integrations'],\n 'DELETE': ['org:admin', 'org:integrations'],\n }\n\n\nclass OrganizationAdminPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:admin'],\n 'POST': ['org:admin'],\n 'PUT': ['org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n\nclass OrganizationAuthProviderPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['org:read'],\n 'POST': ['org:admin'],\n 'PUT': ['org:admin'],\n 'DELETE': ['org:admin'],\n }\n\n\nclass OrganizationEndpoint(Endpoint):\n permission_classes = (OrganizationPermission, )\n\n def convert_args(self, request, organization_slug, *args, **kwargs):\n try:\n organization = Organization.objects.get_from_cache(\n slug=organization_slug,\n )\n except Organization.DoesNotExist:\n raise ResourceDoesNotExist\n\n self.check_object_permissions(request, organization)\n\n raven.tags_context({\n 'organization': organization.id,\n })\n request._request.organization = organization\n\n # Track the 'active' organization when the request came from\n # a cookie based agent (react app)\n if request.auth is None and request.user:\n request.session['activeorg'] = organization.slug\n\n kwargs['organization'] = organization\n return (args, kwargs)\n\n\nclass OrganizationReleasesBaseEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationReleasePermission, )\n\n def get_allowed_projects(self, request, organization):\n has_valid_api_key = False\n if isinstance(request.auth, ApiKey):\n if request.auth.organization_id != organization.id:\n return []\n has_valid_api_key = request.auth.has_scope('project:releases') or \\\n request.auth.has_scope('project:write')\n\n if not (has_valid_api_key or request.user.is_authenticated()):\n return []\n\n if has_valid_api_key or is_active_superuser(request) or organization.flags.allow_joinleave:\n allowed_teams = Team.objects.filter(organization=organization).values_list(\n 'id', flat=True\n )\n else:\n allowed_teams = OrganizationMemberTeam.objects.filter(\n organizationmember__user=request.user,\n team__organization_id=organization.id,\n ).values_list(\n 'team_id', flat=True\n )\n\n return Project.objects.filter(\n id__in=ProjectTeam.objects.filter(\n team_id__in=allowed_teams,\n ).values_list('project_id', flat=True)\n )\n\n def has_release_permission(self, request, organization, release):\n return ReleaseProject.objects.filter(\n release=release,\n project__in=self.get_allowed_projects(request, organization),\n ).exists()\n", "path": "src/sentry/api/bases/organization.py"}]}
| 2,198 | 152 |
gh_patches_debug_17785
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1717
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up test noise (that includes EPP and migration scripts)
### Issue description
Right now if you run the test suite locally or see the output from github, there is a lot of added prints and logs that make it hard to troubleshoot where your particular error is coming from. This ticket is clean up test noise in general including EPP and migration scripts.
### Acceptance criteria
- [ ] unnecessary prints/logs on tests are removed
### Additional context
_No response_
### Links to other issues
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/epplibwrapper/utility/pool.py`
Content:
```
1 import logging
2 from typing import List
3 import gevent
4 from geventconnpool import ConnectionPool
5 from epplibwrapper.socket import Socket
6 from epplibwrapper.utility.pool_error import PoolError, PoolErrorCodes
7
8 try:
9 from epplib.commands import Hello
10 from epplib.exceptions import TransportError
11 except ImportError:
12 pass
13
14 from gevent.lock import BoundedSemaphore
15 from collections import deque
16
17 logger = logging.getLogger(__name__)
18
19
20 class EPPConnectionPool(ConnectionPool):
21 """A connection pool for EPPLib.
22
23 Args:
24 client (Client): The client
25 login (commands.Login): Login creds
26 options (dict): Options for the ConnectionPool
27 base class
28 """
29
30 def __init__(self, client, login, options: dict):
31 # For storing shared credentials
32 self._client = client
33 self._login = login
34
35 # Keep track of each greenlet
36 self.greenlets: List[gevent.Greenlet] = []
37
38 # Define optional pool settings.
39 # Kept in a dict so that the parent class,
40 # client.py, can maintain seperation/expandability
41 self.size = 1
42 if "size" in options:
43 self.size = options["size"]
44
45 self.exc_classes = tuple((TransportError,))
46 if "exc_classes" in options:
47 self.exc_classes = options["exc_classes"]
48
49 self.keepalive = None
50 if "keepalive" in options:
51 self.keepalive = options["keepalive"]
52
53 # Determines the period in which new
54 # gevent threads are spun up.
55 # This time period is in seconds. So for instance, .1 would be .1 seconds.
56 self.spawn_frequency = 0.1
57 if "spawn_frequency" in options:
58 self.spawn_frequency = options["spawn_frequency"]
59
60 self.conn: deque = deque()
61 self.lock = BoundedSemaphore(self.size)
62
63 self.populate_all_connections()
64
65 def _new_connection(self):
66 socket = self._create_socket(self._client, self._login)
67 try:
68 connection = socket.connect()
69 return connection
70 except Exception as err:
71 message = f"Failed to execute due to a registry error: {err}"
72 logger.error(message, exc_info=True)
73 # We want to raise a pool error rather than a LoginError here
74 # because if this occurs internally, we should handle this
75 # differently than we otherwise would for LoginError.
76 raise PoolError(code=PoolErrorCodes.NEW_CONNECTION_FAILED) from err
77
78 def _keepalive(self, c):
79 """Sends a command to the server to keep the connection alive."""
80 try:
81 # Sends a ping to the registry via EPPLib
82 c.send(Hello())
83 except Exception as err:
84 message = "Failed to keep the connection alive."
85 logger.error(message, exc_info=True)
86 raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err
87
88 def _create_socket(self, client, login) -> Socket:
89 """Creates and returns a socket instance"""
90 socket = Socket(client, login)
91 return socket
92
93 def get_connections(self):
94 """Returns the connection queue"""
95 return self.conn
96
97 def kill_all_connections(self):
98 """Kills all active connections in the pool."""
99 try:
100 if len(self.conn) > 0 or len(self.greenlets) > 0:
101 logger.info("Attempting to kill connections")
102 gevent.killall(self.greenlets)
103
104 self.greenlets.clear()
105 for connection in self.conn:
106 connection.disconnect()
107 self.conn.clear()
108
109 # Clear the semaphore
110 self.lock = BoundedSemaphore(self.size)
111 logger.info("Finished killing connections")
112 else:
113 logger.info("No connections to kill.")
114 except Exception as err:
115 logger.error("Could not kill all connections.")
116 raise PoolError(code=PoolErrorCodes.KILL_ALL_FAILED) from err
117
118 def populate_all_connections(self):
119 """Generates the connection pool.
120 If any connections exist, kill them first.
121 Based off of the __init__ definition for geventconnpool.
122 """
123 if len(self.conn) > 0 or len(self.greenlets) > 0:
124 self.kill_all_connections()
125
126 # Setup the lock
127 for i in range(self.size):
128 self.lock.acquire()
129
130 # Open multiple connections
131 for i in range(self.size):
132 self.greenlets.append(gevent.spawn_later(self.spawn_frequency * i, self._addOne))
133
134 # Open a "keepalive" thread if we want to ping open connections
135 if self.keepalive:
136 self.greenlets.append(gevent.spawn(self._keepalive_periodic))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/epplibwrapper/utility/pool.py b/src/epplibwrapper/utility/pool.py
--- a/src/epplibwrapper/utility/pool.py
+++ b/src/epplibwrapper/utility/pool.py
@@ -85,6 +85,21 @@
logger.error(message, exc_info=True)
raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err
+ def _keepalive_periodic(self):
+ """Overriding _keepalive_periodic from geventconnpool so that PoolErrors
+ are properly handled, as opposed to printing to stdout"""
+ delay = float(self.keepalive) / self.size
+ while 1:
+ try:
+ with self.get() as c:
+ self._keepalive(c)
+ except PoolError as err:
+ logger.error(err.message, exc_info=True)
+ except self.exc_classes:
+ # Nothing to do, the pool will generate a new connection later
+ pass
+ gevent.sleep(delay)
+
def _create_socket(self, client, login) -> Socket:
"""Creates and returns a socket instance"""
socket = Socket(client, login)
|
{"golden_diff": "diff --git a/src/epplibwrapper/utility/pool.py b/src/epplibwrapper/utility/pool.py\n--- a/src/epplibwrapper/utility/pool.py\n+++ b/src/epplibwrapper/utility/pool.py\n@@ -85,6 +85,21 @@\n logger.error(message, exc_info=True)\n raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err\n \n+ def _keepalive_periodic(self):\n+ \"\"\"Overriding _keepalive_periodic from geventconnpool so that PoolErrors\n+ are properly handled, as opposed to printing to stdout\"\"\"\n+ delay = float(self.keepalive) / self.size\n+ while 1:\n+ try:\n+ with self.get() as c:\n+ self._keepalive(c)\n+ except PoolError as err:\n+ logger.error(err.message, exc_info=True)\n+ except self.exc_classes:\n+ # Nothing to do, the pool will generate a new connection later\n+ pass\n+ gevent.sleep(delay)\n+\n def _create_socket(self, client, login) -> Socket:\n \"\"\"Creates and returns a socket instance\"\"\"\n socket = Socket(client, login)\n", "issue": "Clean up test noise (that includes EPP and migration scripts)\n### Issue description\r\n\r\nRight now if you run the test suite locally or see the output from github, there is a lot of added prints and logs that make it hard to troubleshoot where your particular error is coming from. This ticket is clean up test noise in general including EPP and migration scripts. \r\n\r\n\r\n\r\n### Acceptance criteria\r\n\r\n- [ ] unnecessary prints/logs on tests are removed\r\n\r\n### Additional context\r\n\r\n_No response_\r\n\r\n### Links to other issues\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nfrom typing import List\nimport gevent\nfrom geventconnpool import ConnectionPool\nfrom epplibwrapper.socket import Socket\nfrom epplibwrapper.utility.pool_error import PoolError, PoolErrorCodes\n\ntry:\n from epplib.commands import Hello\n from epplib.exceptions import TransportError\nexcept ImportError:\n pass\n\nfrom gevent.lock import BoundedSemaphore\nfrom collections import deque\n\nlogger = logging.getLogger(__name__)\n\n\nclass EPPConnectionPool(ConnectionPool):\n \"\"\"A connection pool for EPPLib.\n\n Args:\n client (Client): The client\n login (commands.Login): Login creds\n options (dict): Options for the ConnectionPool\n base class\n \"\"\"\n\n def __init__(self, client, login, options: dict):\n # For storing shared credentials\n self._client = client\n self._login = login\n\n # Keep track of each greenlet\n self.greenlets: List[gevent.Greenlet] = []\n\n # Define optional pool settings.\n # Kept in a dict so that the parent class,\n # client.py, can maintain seperation/expandability\n self.size = 1\n if \"size\" in options:\n self.size = options[\"size\"]\n\n self.exc_classes = tuple((TransportError,))\n if \"exc_classes\" in options:\n self.exc_classes = options[\"exc_classes\"]\n\n self.keepalive = None\n if \"keepalive\" in options:\n self.keepalive = options[\"keepalive\"]\n\n # Determines the period in which new\n # gevent threads are spun up.\n # This time period is in seconds. So for instance, .1 would be .1 seconds.\n self.spawn_frequency = 0.1\n if \"spawn_frequency\" in options:\n self.spawn_frequency = options[\"spawn_frequency\"]\n\n self.conn: deque = deque()\n self.lock = BoundedSemaphore(self.size)\n\n self.populate_all_connections()\n\n def _new_connection(self):\n socket = self._create_socket(self._client, self._login)\n try:\n connection = socket.connect()\n return connection\n except Exception as err:\n message = f\"Failed to execute due to a registry error: {err}\"\n logger.error(message, exc_info=True)\n # We want to raise a pool error rather than a LoginError here\n # because if this occurs internally, we should handle this\n # differently than we otherwise would for LoginError.\n raise PoolError(code=PoolErrorCodes.NEW_CONNECTION_FAILED) from err\n\n def _keepalive(self, c):\n \"\"\"Sends a command to the server to keep the connection alive.\"\"\"\n try:\n # Sends a ping to the registry via EPPLib\n c.send(Hello())\n except Exception as err:\n message = \"Failed to keep the connection alive.\"\n logger.error(message, exc_info=True)\n raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err\n\n def _create_socket(self, client, login) -> Socket:\n \"\"\"Creates and returns a socket instance\"\"\"\n socket = Socket(client, login)\n return socket\n\n def get_connections(self):\n \"\"\"Returns the connection queue\"\"\"\n return self.conn\n\n def kill_all_connections(self):\n \"\"\"Kills all active connections in the pool.\"\"\"\n try:\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n logger.info(\"Attempting to kill connections\")\n gevent.killall(self.greenlets)\n\n self.greenlets.clear()\n for connection in self.conn:\n connection.disconnect()\n self.conn.clear()\n\n # Clear the semaphore\n self.lock = BoundedSemaphore(self.size)\n logger.info(\"Finished killing connections\")\n else:\n logger.info(\"No connections to kill.\")\n except Exception as err:\n logger.error(\"Could not kill all connections.\")\n raise PoolError(code=PoolErrorCodes.KILL_ALL_FAILED) from err\n\n def populate_all_connections(self):\n \"\"\"Generates the connection pool.\n If any connections exist, kill them first.\n Based off of the __init__ definition for geventconnpool.\n \"\"\"\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n self.kill_all_connections()\n\n # Setup the lock\n for i in range(self.size):\n self.lock.acquire()\n\n # Open multiple connections\n for i in range(self.size):\n self.greenlets.append(gevent.spawn_later(self.spawn_frequency * i, self._addOne))\n\n # Open a \"keepalive\" thread if we want to ping open connections\n if self.keepalive:\n self.greenlets.append(gevent.spawn(self._keepalive_periodic))\n", "path": "src/epplibwrapper/utility/pool.py"}], "after_files": [{"content": "import logging\nfrom typing import List\nimport gevent\nfrom geventconnpool import ConnectionPool\nfrom epplibwrapper.socket import Socket\nfrom epplibwrapper.utility.pool_error import PoolError, PoolErrorCodes\n\ntry:\n from epplib.commands import Hello\n from epplib.exceptions import TransportError\nexcept ImportError:\n pass\n\nfrom gevent.lock import BoundedSemaphore\nfrom collections import deque\n\nlogger = logging.getLogger(__name__)\n\n\nclass EPPConnectionPool(ConnectionPool):\n \"\"\"A connection pool for EPPLib.\n\n Args:\n client (Client): The client\n login (commands.Login): Login creds\n options (dict): Options for the ConnectionPool\n base class\n \"\"\"\n\n def __init__(self, client, login, options: dict):\n # For storing shared credentials\n self._client = client\n self._login = login\n\n # Keep track of each greenlet\n self.greenlets: List[gevent.Greenlet] = []\n\n # Define optional pool settings.\n # Kept in a dict so that the parent class,\n # client.py, can maintain seperation/expandability\n self.size = 1\n if \"size\" in options:\n self.size = options[\"size\"]\n\n self.exc_classes = tuple((TransportError,))\n if \"exc_classes\" in options:\n self.exc_classes = options[\"exc_classes\"]\n\n self.keepalive = None\n if \"keepalive\" in options:\n self.keepalive = options[\"keepalive\"]\n\n # Determines the period in which new\n # gevent threads are spun up.\n # This time period is in seconds. So for instance, .1 would be .1 seconds.\n self.spawn_frequency = 0.1\n if \"spawn_frequency\" in options:\n self.spawn_frequency = options[\"spawn_frequency\"]\n\n self.conn: deque = deque()\n self.lock = BoundedSemaphore(self.size)\n\n self.populate_all_connections()\n\n def _new_connection(self):\n socket = self._create_socket(self._client, self._login)\n try:\n connection = socket.connect()\n return connection\n except Exception as err:\n message = f\"Failed to execute due to a registry error: {err}\"\n logger.error(message, exc_info=True)\n # We want to raise a pool error rather than a LoginError here\n # because if this occurs internally, we should handle this\n # differently than we otherwise would for LoginError.\n raise PoolError(code=PoolErrorCodes.NEW_CONNECTION_FAILED) from err\n\n def _keepalive(self, c):\n \"\"\"Sends a command to the server to keep the connection alive.\"\"\"\n try:\n # Sends a ping to the registry via EPPLib\n c.send(Hello())\n except Exception as err:\n message = \"Failed to keep the connection alive.\"\n logger.error(message, exc_info=True)\n raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err\n\n def _keepalive_periodic(self):\n \"\"\"Overriding _keepalive_periodic from geventconnpool so that PoolErrors\n are properly handled, as opposed to printing to stdout\"\"\"\n delay = float(self.keepalive) / self.size\n while 1:\n try:\n with self.get() as c:\n self._keepalive(c)\n except PoolError as err:\n logger.error(err.message, exc_info=True)\n except self.exc_classes:\n # Nothing to do, the pool will generate a new connection later\n pass\n gevent.sleep(delay)\n\n def _create_socket(self, client, login) -> Socket:\n \"\"\"Creates and returns a socket instance\"\"\"\n socket = Socket(client, login)\n return socket\n\n def get_connections(self):\n \"\"\"Returns the connection queue\"\"\"\n return self.conn\n\n def kill_all_connections(self):\n \"\"\"Kills all active connections in the pool.\"\"\"\n try:\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n logger.info(\"Attempting to kill connections\")\n gevent.killall(self.greenlets)\n\n self.greenlets.clear()\n for connection in self.conn:\n connection.disconnect()\n self.conn.clear()\n\n # Clear the semaphore\n self.lock = BoundedSemaphore(self.size)\n logger.info(\"Finished killing connections\")\n else:\n logger.info(\"No connections to kill.\")\n except Exception as err:\n logger.error(\"Could not kill all connections.\")\n raise PoolError(code=PoolErrorCodes.KILL_ALL_FAILED) from err\n\n def populate_all_connections(self):\n \"\"\"Generates the connection pool.\n If any connections exist, kill them first.\n Based off of the __init__ definition for geventconnpool.\n \"\"\"\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n self.kill_all_connections()\n\n # Setup the lock\n for i in range(self.size):\n self.lock.acquire()\n\n # Open multiple connections\n for i in range(self.size):\n self.greenlets.append(gevent.spawn_later(self.spawn_frequency * i, self._addOne))\n\n # Open a \"keepalive\" thread if we want to ping open connections\n if self.keepalive:\n self.greenlets.append(gevent.spawn(self._keepalive_periodic))\n", "path": "src/epplibwrapper/utility/pool.py"}]}
| 1,681 | 256 |
gh_patches_debug_22309
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-5494
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update docs for `cupy.linalg.eigh` and `cupy.linalg.eigvalsh`
https://docs.cupy.dev/en/stable/reference/generated/cupy.linalg.eigvalsh.html
> Calculates eigenvalues of a symmetric matrix.
https://numpy.org/doc/stable/reference/generated/numpy.linalg.eigvalsh.html
> Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Documentation needs to be updated as we already support Hermitian matrix in https://github.com/cupy/cupy/pull/1518.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/linalg/_eigenvalue.py`
Content:
```
1 import numpy
2
3 import cupy
4 from cupy_backends.cuda.libs import cublas
5 from cupy_backends.cuda.libs import cusolver
6 from cupy.cuda import device
7 from cupy.linalg import _util
8
9
10 def _syevd(a, UPLO, with_eigen_vector):
11 if UPLO not in ('L', 'U'):
12 raise ValueError('UPLO argument must be \'L\' or \'U\'')
13
14 # reject_float16=False for backward compatibility
15 dtype, v_dtype = _util.linalg_common_type(a, reject_float16=False)
16 real_dtype = dtype.char.lower()
17 w_dtype = v_dtype.char.lower()
18
19 # Note that cuSolver assumes fortran array
20 v = a.astype(dtype, order='F', copy=True)
21
22 m, lda = a.shape
23 w = cupy.empty(m, real_dtype)
24 dev_info = cupy.empty((), numpy.int32)
25 handle = device.Device().cusolver_handle
26
27 if with_eigen_vector:
28 jobz = cusolver.CUSOLVER_EIG_MODE_VECTOR
29 else:
30 jobz = cusolver.CUSOLVER_EIG_MODE_NOVECTOR
31
32 if UPLO == 'L':
33 uplo = cublas.CUBLAS_FILL_MODE_LOWER
34 else: # UPLO == 'U'
35 uplo = cublas.CUBLAS_FILL_MODE_UPPER
36
37 if dtype == 'f':
38 buffer_size = cupy.cuda.cusolver.ssyevd_bufferSize
39 syevd = cupy.cuda.cusolver.ssyevd
40 elif dtype == 'd':
41 buffer_size = cupy.cuda.cusolver.dsyevd_bufferSize
42 syevd = cupy.cuda.cusolver.dsyevd
43 elif dtype == 'F':
44 buffer_size = cupy.cuda.cusolver.cheevd_bufferSize
45 syevd = cupy.cuda.cusolver.cheevd
46 elif dtype == 'D':
47 buffer_size = cupy.cuda.cusolver.zheevd_bufferSize
48 syevd = cupy.cuda.cusolver.zheevd
49 else:
50 raise RuntimeError('Only float and double and cuComplex and '
51 + 'cuDoubleComplex are supported')
52
53 work_size = buffer_size(
54 handle, jobz, uplo, m, v.data.ptr, lda, w.data.ptr)
55 work = cupy.empty(work_size, dtype)
56 syevd(
57 handle, jobz, uplo, m, v.data.ptr, lda,
58 w.data.ptr, work.data.ptr, work_size, dev_info.data.ptr)
59 cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
60 syevd, dev_info)
61
62 return w.astype(w_dtype, copy=False), v.astype(v_dtype, copy=False)
63
64
65 # TODO(okuta): Implement eig
66
67
68 def eigh(a, UPLO='L'):
69 """Eigenvalues and eigenvectors of a symmetric matrix.
70
71 This method calculates eigenvalues and eigenvectors of a given
72 symmetric matrix.
73
74 Args:
75 a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch
76 of symmetric 2-D square matrices ``(..., M, M)``.
77 UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which
78 part of ``a`` is used. ``'L'`` uses the lower triangular part of
79 ``a``, and ``'U'`` uses the upper triangular part of ``a``.
80 Returns:
81 tuple of :class:`~cupy.ndarray`:
82 Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and
83 ``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector
84 corresponding to an eigenvalue ``w[i]``. For batch input,
85 ``v[k, :, i]`` is an eigenvector corresponding to an eigenvalue
86 ``w[k, i]`` of ``a[k]``.
87
88 .. warning::
89 This function calls one or more cuSOLVER routine(s) which may yield
90 invalid results if input conditions are not met.
91 To detect these invalid results, you can set the `linalg`
92 configuration to a value that is not `ignore` in
93 :func:`cupyx.errstate` or :func:`cupyx.seterr`.
94
95 .. seealso:: :func:`numpy.linalg.eigh`
96 """
97 if a.ndim < 2:
98 raise ValueError('Array must be at least two-dimensional')
99
100 m, n = a.shape[-2:]
101 if m != n:
102 raise ValueError('Last 2 dimensions of the array must be square')
103
104 if a.ndim > 2:
105 return cupy.cusolver.syevj(a, UPLO, True)
106 else:
107 return _syevd(a, UPLO, True)
108
109
110 # TODO(okuta): Implement eigvals
111
112
113 def eigvalsh(a, UPLO='L'):
114 """Calculates eigenvalues of a symmetric matrix.
115
116 This method calculates eigenvalues a given symmetric matrix.
117 Note that :func:`cupy.linalg.eigh` calculates both eigenvalues and
118 eigenvectors.
119
120 Args:
121 a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch
122 of symmetric 2-D square matrices ``(..., M, M)``.
123 UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which
124 part of ``a`` is used. ``'L'`` uses the lower triangular part of
125 ``a``, and ``'U'`` uses the upper triangular part of ``a``.
126 Returns:
127 cupy.ndarray:
128 Returns eigenvalues as a vector ``w``. For batch input,
129 ``w[k]`` is a vector of eigenvalues of matrix ``a[k]``.
130
131 .. warning::
132 This function calls one or more cuSOLVER routine(s) which may yield
133 invalid results if input conditions are not met.
134 To detect these invalid results, you can set the `linalg`
135 configuration to a value that is not `ignore` in
136 :func:`cupyx.errstate` or :func:`cupyx.seterr`.
137
138 .. seealso:: :func:`numpy.linalg.eigvalsh`
139 """
140 if a.ndim < 2:
141 raise ValueError('Array must be at least two-dimensional')
142
143 _util._assert_nd_squareness(a)
144
145 if a.ndim > 2:
146 return cupy.cusolver.syevj(a, UPLO, False)
147 else:
148 return _syevd(a, UPLO, False)[0]
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupy/linalg/_eigenvalue.py b/cupy/linalg/_eigenvalue.py
--- a/cupy/linalg/_eigenvalue.py
+++ b/cupy/linalg/_eigenvalue.py
@@ -66,10 +66,13 @@
def eigh(a, UPLO='L'):
- """Eigenvalues and eigenvectors of a symmetric matrix.
+ """
+ Return the eigenvalues and eigenvectors of a complex Hermitian
+ (conjugate symmetric) or a real symmetric matrix.
- This method calculates eigenvalues and eigenvectors of a given
- symmetric matrix.
+ Returns two objects, a 1-D array containing the eigenvalues of `a`, and
+ a 2-D square array or matrix (depending on the input type) of the
+ corresponding eigenvectors (in columns).
Args:
a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch
@@ -111,11 +114,10 @@
def eigvalsh(a, UPLO='L'):
- """Calculates eigenvalues of a symmetric matrix.
+ """
+ Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
- This method calculates eigenvalues a given symmetric matrix.
- Note that :func:`cupy.linalg.eigh` calculates both eigenvalues and
- eigenvectors.
+ Main difference from eigh: the eigenvectors are not computed.
Args:
a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch
|
{"golden_diff": "diff --git a/cupy/linalg/_eigenvalue.py b/cupy/linalg/_eigenvalue.py\n--- a/cupy/linalg/_eigenvalue.py\n+++ b/cupy/linalg/_eigenvalue.py\n@@ -66,10 +66,13 @@\n \n \n def eigh(a, UPLO='L'):\n- \"\"\"Eigenvalues and eigenvectors of a symmetric matrix.\n+ \"\"\"\n+ Return the eigenvalues and eigenvectors of a complex Hermitian\n+ (conjugate symmetric) or a real symmetric matrix.\n \n- This method calculates eigenvalues and eigenvectors of a given\n- symmetric matrix.\n+ Returns two objects, a 1-D array containing the eigenvalues of `a`, and\n+ a 2-D square array or matrix (depending on the input type) of the\n+ corresponding eigenvectors (in columns).\n \n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n@@ -111,11 +114,10 @@\n \n \n def eigvalsh(a, UPLO='L'):\n- \"\"\"Calculates eigenvalues of a symmetric matrix.\n+ \"\"\"\n+ Compute the eigenvalues of a complex Hermitian or real symmetric matrix.\n \n- This method calculates eigenvalues a given symmetric matrix.\n- Note that :func:`cupy.linalg.eigh` calculates both eigenvalues and\n- eigenvectors.\n+ Main difference from eigh: the eigenvectors are not computed.\n \n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n", "issue": "Update docs for `cupy.linalg.eigh` and `cupy.linalg.eigvalsh`\nhttps://docs.cupy.dev/en/stable/reference/generated/cupy.linalg.eigvalsh.html\r\n> Calculates eigenvalues of a symmetric matrix.\r\n\r\nhttps://numpy.org/doc/stable/reference/generated/numpy.linalg.eigvalsh.html\r\n> Compute the eigenvalues of a complex Hermitian or real symmetric matrix.\r\n\r\nDocumentation needs to be updated as we already support Hermitian matrix in https://github.com/cupy/cupy/pull/1518.\n", "before_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy_backends.cuda.libs import cublas\nfrom cupy_backends.cuda.libs import cusolver\nfrom cupy.cuda import device\nfrom cupy.linalg import _util\n\n\ndef _syevd(a, UPLO, with_eigen_vector):\n if UPLO not in ('L', 'U'):\n raise ValueError('UPLO argument must be \\'L\\' or \\'U\\'')\n\n # reject_float16=False for backward compatibility\n dtype, v_dtype = _util.linalg_common_type(a, reject_float16=False)\n real_dtype = dtype.char.lower()\n w_dtype = v_dtype.char.lower()\n\n # Note that cuSolver assumes fortran array\n v = a.astype(dtype, order='F', copy=True)\n\n m, lda = a.shape\n w = cupy.empty(m, real_dtype)\n dev_info = cupy.empty((), numpy.int32)\n handle = device.Device().cusolver_handle\n\n if with_eigen_vector:\n jobz = cusolver.CUSOLVER_EIG_MODE_VECTOR\n else:\n jobz = cusolver.CUSOLVER_EIG_MODE_NOVECTOR\n\n if UPLO == 'L':\n uplo = cublas.CUBLAS_FILL_MODE_LOWER\n else: # UPLO == 'U'\n uplo = cublas.CUBLAS_FILL_MODE_UPPER\n\n if dtype == 'f':\n buffer_size = cupy.cuda.cusolver.ssyevd_bufferSize\n syevd = cupy.cuda.cusolver.ssyevd\n elif dtype == 'd':\n buffer_size = cupy.cuda.cusolver.dsyevd_bufferSize\n syevd = cupy.cuda.cusolver.dsyevd\n elif dtype == 'F':\n buffer_size = cupy.cuda.cusolver.cheevd_bufferSize\n syevd = cupy.cuda.cusolver.cheevd\n elif dtype == 'D':\n buffer_size = cupy.cuda.cusolver.zheevd_bufferSize\n syevd = cupy.cuda.cusolver.zheevd\n else:\n raise RuntimeError('Only float and double and cuComplex and '\n + 'cuDoubleComplex are supported')\n\n work_size = buffer_size(\n handle, jobz, uplo, m, v.data.ptr, lda, w.data.ptr)\n work = cupy.empty(work_size, dtype)\n syevd(\n handle, jobz, uplo, m, v.data.ptr, lda,\n w.data.ptr, work.data.ptr, work_size, dev_info.data.ptr)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n syevd, dev_info)\n\n return w.astype(w_dtype, copy=False), v.astype(v_dtype, copy=False)\n\n\n# TODO(okuta): Implement eig\n\n\ndef eigh(a, UPLO='L'):\n \"\"\"Eigenvalues and eigenvectors of a symmetric matrix.\n\n This method calculates eigenvalues and eigenvectors of a given\n symmetric matrix.\n\n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n of symmetric 2-D square matrices ``(..., M, M)``.\n UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which\n part of ``a`` is used. ``'L'`` uses the lower triangular part of\n ``a``, and ``'U'`` uses the upper triangular part of ``a``.\n Returns:\n tuple of :class:`~cupy.ndarray`:\n Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and\n ``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector\n corresponding to an eigenvalue ``w[i]``. For batch input,\n ``v[k, :, i]`` is an eigenvector corresponding to an eigenvalue\n ``w[k, i]`` of ``a[k]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. seealso:: :func:`numpy.linalg.eigh`\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Array must be at least two-dimensional')\n\n m, n = a.shape[-2:]\n if m != n:\n raise ValueError('Last 2 dimensions of the array must be square')\n\n if a.ndim > 2:\n return cupy.cusolver.syevj(a, UPLO, True)\n else:\n return _syevd(a, UPLO, True)\n\n\n# TODO(okuta): Implement eigvals\n\n\ndef eigvalsh(a, UPLO='L'):\n \"\"\"Calculates eigenvalues of a symmetric matrix.\n\n This method calculates eigenvalues a given symmetric matrix.\n Note that :func:`cupy.linalg.eigh` calculates both eigenvalues and\n eigenvectors.\n\n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n of symmetric 2-D square matrices ``(..., M, M)``.\n UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which\n part of ``a`` is used. ``'L'`` uses the lower triangular part of\n ``a``, and ``'U'`` uses the upper triangular part of ``a``.\n Returns:\n cupy.ndarray:\n Returns eigenvalues as a vector ``w``. For batch input,\n ``w[k]`` is a vector of eigenvalues of matrix ``a[k]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. seealso:: :func:`numpy.linalg.eigvalsh`\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Array must be at least two-dimensional')\n\n _util._assert_nd_squareness(a)\n\n if a.ndim > 2:\n return cupy.cusolver.syevj(a, UPLO, False)\n else:\n return _syevd(a, UPLO, False)[0]\n", "path": "cupy/linalg/_eigenvalue.py"}], "after_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy_backends.cuda.libs import cublas\nfrom cupy_backends.cuda.libs import cusolver\nfrom cupy.cuda import device\nfrom cupy.linalg import _util\n\n\ndef _syevd(a, UPLO, with_eigen_vector):\n if UPLO not in ('L', 'U'):\n raise ValueError('UPLO argument must be \\'L\\' or \\'U\\'')\n\n # reject_float16=False for backward compatibility\n dtype, v_dtype = _util.linalg_common_type(a, reject_float16=False)\n real_dtype = dtype.char.lower()\n w_dtype = v_dtype.char.lower()\n\n # Note that cuSolver assumes fortran array\n v = a.astype(dtype, order='F', copy=True)\n\n m, lda = a.shape\n w = cupy.empty(m, real_dtype)\n dev_info = cupy.empty((), numpy.int32)\n handle = device.Device().cusolver_handle\n\n if with_eigen_vector:\n jobz = cusolver.CUSOLVER_EIG_MODE_VECTOR\n else:\n jobz = cusolver.CUSOLVER_EIG_MODE_NOVECTOR\n\n if UPLO == 'L':\n uplo = cublas.CUBLAS_FILL_MODE_LOWER\n else: # UPLO == 'U'\n uplo = cublas.CUBLAS_FILL_MODE_UPPER\n\n if dtype == 'f':\n buffer_size = cupy.cuda.cusolver.ssyevd_bufferSize\n syevd = cupy.cuda.cusolver.ssyevd\n elif dtype == 'd':\n buffer_size = cupy.cuda.cusolver.dsyevd_bufferSize\n syevd = cupy.cuda.cusolver.dsyevd\n elif dtype == 'F':\n buffer_size = cupy.cuda.cusolver.cheevd_bufferSize\n syevd = cupy.cuda.cusolver.cheevd\n elif dtype == 'D':\n buffer_size = cupy.cuda.cusolver.zheevd_bufferSize\n syevd = cupy.cuda.cusolver.zheevd\n else:\n raise RuntimeError('Only float and double and cuComplex and '\n + 'cuDoubleComplex are supported')\n\n work_size = buffer_size(\n handle, jobz, uplo, m, v.data.ptr, lda, w.data.ptr)\n work = cupy.empty(work_size, dtype)\n syevd(\n handle, jobz, uplo, m, v.data.ptr, lda,\n w.data.ptr, work.data.ptr, work_size, dev_info.data.ptr)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n syevd, dev_info)\n\n return w.astype(w_dtype, copy=False), v.astype(v_dtype, copy=False)\n\n\n# TODO(okuta): Implement eig\n\n\ndef eigh(a, UPLO='L'):\n \"\"\"\n Return the eigenvalues and eigenvectors of a complex Hermitian\n (conjugate symmetric) or a real symmetric matrix.\n\n Returns two objects, a 1-D array containing the eigenvalues of `a`, and\n a 2-D square array or matrix (depending on the input type) of the\n corresponding eigenvectors (in columns).\n\n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n of symmetric 2-D square matrices ``(..., M, M)``.\n UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which\n part of ``a`` is used. ``'L'`` uses the lower triangular part of\n ``a``, and ``'U'`` uses the upper triangular part of ``a``.\n Returns:\n tuple of :class:`~cupy.ndarray`:\n Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and\n ``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector\n corresponding to an eigenvalue ``w[i]``. For batch input,\n ``v[k, :, i]`` is an eigenvector corresponding to an eigenvalue\n ``w[k, i]`` of ``a[k]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. seealso:: :func:`numpy.linalg.eigh`\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Array must be at least two-dimensional')\n\n m, n = a.shape[-2:]\n if m != n:\n raise ValueError('Last 2 dimensions of the array must be square')\n\n if a.ndim > 2:\n return cupy.cusolver.syevj(a, UPLO, True)\n else:\n return _syevd(a, UPLO, True)\n\n\n# TODO(okuta): Implement eigvals\n\n\ndef eigvalsh(a, UPLO='L'):\n \"\"\"\n Compute the eigenvalues of a complex Hermitian or real symmetric matrix.\n\n Main difference from eigh: the eigenvectors are not computed.\n\n Args:\n a (cupy.ndarray): A symmetric 2-D square matrix ``(M, M)`` or a batch\n of symmetric 2-D square matrices ``(..., M, M)``.\n UPLO (str): Select from ``'L'`` or ``'U'``. It specifies which\n part of ``a`` is used. ``'L'`` uses the lower triangular part of\n ``a``, and ``'U'`` uses the upper triangular part of ``a``.\n Returns:\n cupy.ndarray:\n Returns eigenvalues as a vector ``w``. For batch input,\n ``w[k]`` is a vector of eigenvalues of matrix ``a[k]``.\n\n .. warning::\n This function calls one or more cuSOLVER routine(s) which may yield\n invalid results if input conditions are not met.\n To detect these invalid results, you can set the `linalg`\n configuration to a value that is not `ignore` in\n :func:`cupyx.errstate` or :func:`cupyx.seterr`.\n\n .. seealso:: :func:`numpy.linalg.eigvalsh`\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Array must be at least two-dimensional')\n\n _util._assert_nd_squareness(a)\n\n if a.ndim > 2:\n return cupy.cusolver.syevj(a, UPLO, False)\n else:\n return _syevd(a, UPLO, False)[0]\n", "path": "cupy/linalg/_eigenvalue.py"}]}
| 2,175 | 363 |
gh_patches_debug_10114
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-982
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mockserver server.py does not work in Python 3
```mockserver_1 | Traceback (most recent call last):
mockserver_1 | File "../server.py", line 5, in <module>
mockserver_1 | from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
mockserver_1 | ModuleNotFoundError: No module named 'SimpleHTTPServer'
```
Looks like some modules have been reorganized in Python 3. Hopefully this is just a matter of updating the imports.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mockserver/server.py`
Content:
```
1 #! /usr/bin/env python
2
3 # Usage: python __file__.py <port>
4
5 from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
6
7 class CORSRequestHandler(SimpleHTTPRequestHandler):
8 def do_OPTIONS(self):
9 self.send_response(200, 'OK')
10 self.end_headers()
11
12 def end_headers(self):
13 self.send_header('Access-Control-Allow-Origin', '*')
14 self.send_header('Access-Control-Allow-Headers', 'x-request-timestamp, x-signature, electricitymap-token')
15 SimpleHTTPRequestHandler.end_headers(self)
16
17 if __name__ == '__main__':
18 BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mockserver/server.py b/mockserver/server.py
--- a/mockserver/server.py
+++ b/mockserver/server.py
@@ -2,7 +2,7 @@
# Usage: python __file__.py <port>
-from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
+from http.server import SimpleHTTPRequestHandler, HTTPServer, test
class CORSRequestHandler(SimpleHTTPRequestHandler):
def do_OPTIONS(self):
@@ -15,4 +15,4 @@
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
- BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)
+ test(CORSRequestHandler, HTTPServer)
|
{"golden_diff": "diff --git a/mockserver/server.py b/mockserver/server.py\n--- a/mockserver/server.py\n+++ b/mockserver/server.py\n@@ -2,7 +2,7 @@\n \n # Usage: python __file__.py <port>\n \n-from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\n+from http.server import SimpleHTTPRequestHandler, HTTPServer, test\n \n class CORSRequestHandler(SimpleHTTPRequestHandler):\n def do_OPTIONS(self):\n@@ -15,4 +15,4 @@\n SimpleHTTPRequestHandler.end_headers(self)\n \n if __name__ == '__main__':\n- BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)\n+ test(CORSRequestHandler, HTTPServer)\n", "issue": "Mockserver server.py does not work in Python 3\n```mockserver_1 | Traceback (most recent call last):\r\nmockserver_1 | File \"../server.py\", line 5, in <module>\r\nmockserver_1 | from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\r\nmockserver_1 | ModuleNotFoundError: No module named 'SimpleHTTPServer'\r\n```\r\nLooks like some modules have been reorganized in Python 3. Hopefully this is just a matter of updating the imports.\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# Usage: python __file__.py <port>\n\nfrom SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\n\nclass CORSRequestHandler(SimpleHTTPRequestHandler):\n def do_OPTIONS(self):\n self.send_response(200, 'OK')\n self.end_headers()\n\n def end_headers(self):\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers', 'x-request-timestamp, x-signature, electricitymap-token')\n SimpleHTTPRequestHandler.end_headers(self)\n\nif __name__ == '__main__':\n BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)\n", "path": "mockserver/server.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\n# Usage: python __file__.py <port>\n\nfrom http.server import SimpleHTTPRequestHandler, HTTPServer, test\n\nclass CORSRequestHandler(SimpleHTTPRequestHandler):\n def do_OPTIONS(self):\n self.send_response(200, 'OK')\n self.end_headers()\n\n def end_headers(self):\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers', 'x-request-timestamp, x-signature, electricitymap-token')\n SimpleHTTPRequestHandler.end_headers(self)\n\nif __name__ == '__main__':\n test(CORSRequestHandler, HTTPServer)\n", "path": "mockserver/server.py"}]}
| 541 | 150 |
gh_patches_debug_22802
|
rasdani/github-patches
|
git_diff
|
celery__celery-7785
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery Import Error
<!--
Please use one of our issue templates.
We reserve the right to close bug reports or feature requests who don't use our templates.
-->
Not able to import Celery module when creating simple app.
from celery import Celery
ImportError: cannot import name 'Celery' from 'celery'
Additonal info:
This issue doesn't occur when we downgrade importlib-metadata to 4.12.0
Env details

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/utils/imports.py`
Content:
```
1 """Utilities related to importing modules and symbols by name."""
2 import os
3 import sys
4 import warnings
5 from contextlib import contextmanager
6 from importlib import import_module, reload
7
8 try:
9 from importlib.metadata import entry_points
10 except ImportError:
11 from importlib_metadata import entry_points
12
13 from kombu.utils.imports import symbol_by_name
14
15 #: Billiard sets this when execv is enabled.
16 #: We use it to find out the name of the original ``__main__``
17 #: module, so that we can properly rewrite the name of the
18 #: task to be that of ``App.main``.
19 MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE')
20
21 __all__ = (
22 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name',
23 'cwd_in_path', 'find_module', 'import_from_cwd',
24 'reload_from_cwd', 'module_file', 'gen_task_name',
25 )
26
27
28 class NotAPackage(Exception):
29 """Raised when importing a package, but it's not a package."""
30
31
32 def qualname(obj):
33 """Return object name."""
34 if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
35 obj = obj.__class__
36 q = getattr(obj, '__qualname__', None)
37 if '.' not in q:
38 q = '.'.join((obj.__module__, q))
39 return q
40
41
42 def instantiate(name, *args, **kwargs):
43 """Instantiate class by name.
44
45 See Also:
46 :func:`symbol_by_name`.
47 """
48 return symbol_by_name(name)(*args, **kwargs)
49
50
51 @contextmanager
52 def cwd_in_path():
53 """Context adding the current working directory to sys.path."""
54 cwd = os.getcwd()
55 if cwd in sys.path:
56 yield
57 else:
58 sys.path.insert(0, cwd)
59 try:
60 yield cwd
61 finally:
62 try:
63 sys.path.remove(cwd)
64 except ValueError: # pragma: no cover
65 pass
66
67
68 def find_module(module, path=None, imp=None):
69 """Version of :func:`imp.find_module` supporting dots."""
70 if imp is None:
71 imp = import_module
72 with cwd_in_path():
73 try:
74 return imp(module)
75 except ImportError:
76 # Raise a more specific error if the problem is that one of the
77 # dot-separated segments of the module name is not a package.
78 if '.' in module:
79 parts = module.split('.')
80 for i, part in enumerate(parts[:-1]):
81 package = '.'.join(parts[:i + 1])
82 try:
83 mpart = imp(package)
84 except ImportError:
85 # Break out and re-raise the original ImportError
86 # instead.
87 break
88 try:
89 mpart.__path__
90 except AttributeError:
91 raise NotAPackage(package)
92 raise
93
94
95 def import_from_cwd(module, imp=None, package=None):
96 """Import module, temporarily including modules in the current directory.
97
98 Modules located in the current directory has
99 precedence over modules located in `sys.path`.
100 """
101 if imp is None:
102 imp = import_module
103 with cwd_in_path():
104 return imp(module, package=package)
105
106
107 def reload_from_cwd(module, reloader=None):
108 """Reload module (ensuring that CWD is in sys.path)."""
109 if reloader is None:
110 reloader = reload
111 with cwd_in_path():
112 return reloader(module)
113
114
115 def module_file(module):
116 """Return the correct original file name of a module."""
117 name = module.__file__
118 return name[:-1] if name.endswith('.pyc') else name
119
120
121 def gen_task_name(app, name, module_name):
122 """Generate task name from name/module pair."""
123 module_name = module_name or '__main__'
124 try:
125 module = sys.modules[module_name]
126 except KeyError:
127 # Fix for manage.py shell_plus (Issue #366)
128 module = None
129
130 if module is not None:
131 module_name = module.__name__
132 # - If the task module is used as the __main__ script
133 # - we need to rewrite the module part of the task name
134 # - to match App.main.
135 if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:
136 # - see comment about :envvar:`MP_MAIN_FILE` above.
137 module_name = '__main__'
138 if module_name == '__main__' and app.main:
139 return '.'.join([app.main, name])
140 return '.'.join(p for p in (module_name, name) if p)
141
142
143 def load_extension_class_names(namespace):
144 for ep in entry_points().get(namespace, []):
145 yield ep.name, ep.value
146
147
148 def load_extension_classes(namespace):
149 for name, class_name in load_extension_class_names(namespace):
150 try:
151 cls = symbol_by_name(class_name)
152 except (ImportError, SyntaxError) as exc:
153 warnings.warn(
154 f'Cannot load {namespace} extension {class_name!r}: {exc!r}')
155 else:
156 yield name, cls
157
```
Path: `celery/bin/celery.py`
Content:
```
1 """Celery Command Line Interface."""
2 import os
3 import pathlib
4 import traceback
5
6 try:
7 from importlib.metadata import entry_points
8 except ImportError:
9 from importlib_metadata import entry_points
10
11 import click
12 import click.exceptions
13 from click.types import ParamType
14 from click_didyoumean import DYMGroup
15 from click_plugins import with_plugins
16
17 from celery import VERSION_BANNER
18 from celery.app.utils import find_app
19 from celery.bin.amqp import amqp
20 from celery.bin.base import CeleryCommand, CeleryOption, CLIContext
21 from celery.bin.beat import beat
22 from celery.bin.call import call
23 from celery.bin.control import control, inspect, status
24 from celery.bin.events import events
25 from celery.bin.graph import graph
26 from celery.bin.list import list_
27 from celery.bin.logtool import logtool
28 from celery.bin.migrate import migrate
29 from celery.bin.multi import multi
30 from celery.bin.purge import purge
31 from celery.bin.result import result
32 from celery.bin.shell import shell
33 from celery.bin.upgrade import upgrade
34 from celery.bin.worker import worker
35
36 UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style("""
37 Unable to load celery application.
38 The module {0} was not found.""", fg='red')
39
40 UNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style("""
41 Unable to load celery application.
42 While trying to load the module {0} the following error occurred:
43 {1}""", fg='red')
44
45 UNABLE_TO_LOAD_APP_APP_MISSING = click.style("""
46 Unable to load celery application.
47 {0}""")
48
49
50 class App(ParamType):
51 """Application option."""
52
53 name = "application"
54
55 def convert(self, value, param, ctx):
56 try:
57 return find_app(value)
58 except ModuleNotFoundError as e:
59 if e.name != value:
60 exc = traceback.format_exc()
61 self.fail(
62 UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
63 )
64 self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))
65 except AttributeError as e:
66 attribute_name = e.args[0].capitalize()
67 self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))
68 except Exception:
69 exc = traceback.format_exc()
70 self.fail(
71 UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
72 )
73
74
75 APP = App()
76
77
78 @with_plugins(entry_points().get('celery.commands', []))
79 @click.group(cls=DYMGroup, invoke_without_command=True)
80 @click.option('-A',
81 '--app',
82 envvar='APP',
83 cls=CeleryOption,
84 type=APP,
85 help_group="Global Options")
86 @click.option('-b',
87 '--broker',
88 envvar='BROKER_URL',
89 cls=CeleryOption,
90 help_group="Global Options")
91 @click.option('--result-backend',
92 envvar='RESULT_BACKEND',
93 cls=CeleryOption,
94 help_group="Global Options")
95 @click.option('--loader',
96 envvar='LOADER',
97 cls=CeleryOption,
98 help_group="Global Options")
99 @click.option('--config',
100 envvar='CONFIG_MODULE',
101 cls=CeleryOption,
102 help_group="Global Options")
103 @click.option('--workdir',
104 cls=CeleryOption,
105 type=pathlib.Path,
106 callback=lambda _, __, wd: os.chdir(wd) if wd else None,
107 is_eager=True,
108 help_group="Global Options")
109 @click.option('-C',
110 '--no-color',
111 envvar='NO_COLOR',
112 is_flag=True,
113 cls=CeleryOption,
114 help_group="Global Options")
115 @click.option('-q',
116 '--quiet',
117 is_flag=True,
118 cls=CeleryOption,
119 help_group="Global Options")
120 @click.option('--version',
121 cls=CeleryOption,
122 is_flag=True,
123 help_group="Global Options")
124 @click.pass_context
125 def celery(ctx, app, broker, result_backend, loader, config, workdir,
126 no_color, quiet, version):
127 """Celery command entrypoint."""
128 if version:
129 click.echo(VERSION_BANNER)
130 ctx.exit()
131 elif ctx.invoked_subcommand is None:
132 click.echo(ctx.get_help())
133 ctx.exit()
134
135 if loader:
136 # Default app takes loader from this env (Issue #1066).
137 os.environ['CELERY_LOADER'] = loader
138 if broker:
139 os.environ['CELERY_BROKER_URL'] = broker
140 if result_backend:
141 os.environ['CELERY_RESULT_BACKEND'] = result_backend
142 if config:
143 os.environ['CELERY_CONFIG_MODULE'] = config
144 ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
145 quiet=quiet)
146
147 # User options
148 worker.params.extend(ctx.obj.app.user_options.get('worker', []))
149 beat.params.extend(ctx.obj.app.user_options.get('beat', []))
150 events.params.extend(ctx.obj.app.user_options.get('events', []))
151
152 for command in celery.commands.values():
153 command.params.extend(ctx.obj.app.user_options.get('preload', []))
154
155
156 @celery.command(cls=CeleryCommand)
157 @click.pass_context
158 def report(ctx):
159 """Shows information useful to include in bug-reports."""
160 app = ctx.obj.app
161 app.loader.import_default_modules()
162 ctx.obj.echo(app.bugreport())
163
164
165 celery.add_command(purge)
166 celery.add_command(call)
167 celery.add_command(beat)
168 celery.add_command(list_)
169 celery.add_command(result)
170 celery.add_command(migrate)
171 celery.add_command(status)
172 celery.add_command(worker)
173 celery.add_command(events)
174 celery.add_command(inspect)
175 celery.add_command(control)
176 celery.add_command(graph)
177 celery.add_command(upgrade)
178 celery.add_command(logtool)
179 celery.add_command(amqp)
180 celery.add_command(shell)
181 celery.add_command(multi)
182
183 # Monkey-patch click to display a custom error
184 # when -A or --app are used as sub-command options instead of as options
185 # of the global command.
186
187 previous_show_implementation = click.exceptions.NoSuchOption.show
188
189 WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command:
190 celery {info_name} {option_name} celeryapp <...>
191
192 The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:
193 celery {option_name} celeryapp {info_name} <...>"""
194
195
196 def _show(self, file=None):
197 if self.option_name in ('-A', '--app'):
198 self.ctx.obj.error(
199 WRONG_APP_OPTION_USAGE_MESSAGE.format(
200 option_name=self.option_name,
201 info_name=self.ctx.info_name),
202 fg='red'
203 )
204 previous_show_implementation(self, file=file)
205
206
207 click.exceptions.NoSuchOption.show = _show
208
209
210 def main() -> int:
211 """Start celery umbrella command.
212
213 This function is the main entrypoint for the CLI.
214
215 :return: The exit code of the CLI.
216 """
217 return celery(auto_envvar_prefix="CELERY")
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,6 +1,7 @@
"""Celery Command Line Interface."""
import os
import pathlib
+import sys
import traceback
try:
@@ -75,7 +76,16 @@
APP = App()
-@with_plugins(entry_points().get('celery.commands', []))
+if sys.version_info >= (3, 10):
+ _PLUGINS = entry_points(group='celery.commands')
+else:
+ try:
+ _PLUGINS = entry_points().get('celery.commands', [])
+ except AttributeError:
+ _PLUGINS = entry_points().select(group='celery.commands')
+
+
+@with_plugins(_PLUGINS)
@click.group(cls=DYMGroup, invoke_without_command=True)
@click.option('-A',
'--app',
diff --git a/celery/utils/imports.py b/celery/utils/imports.py
--- a/celery/utils/imports.py
+++ b/celery/utils/imports.py
@@ -141,7 +141,14 @@
def load_extension_class_names(namespace):
- for ep in entry_points().get(namespace, []):
+ if sys.version_info >= (3, 10):
+ _entry_points = entry_points(group=namespace)
+ else:
+ try:
+ _entry_points = entry_points().get(namespace, [])
+ except AttributeError:
+ _entry_points = entry_points().select(group=namespace)
+ for ep in _entry_points:
yield ep.name, ep.value
|
{"golden_diff": "diff --git a/celery/bin/celery.py b/celery/bin/celery.py\n--- a/celery/bin/celery.py\n+++ b/celery/bin/celery.py\n@@ -1,6 +1,7 @@\n \"\"\"Celery Command Line Interface.\"\"\"\n import os\n import pathlib\n+import sys\n import traceback\n \n try:\n@@ -75,7 +76,16 @@\n APP = App()\n \n \n-@with_plugins(entry_points().get('celery.commands', []))\n+if sys.version_info >= (3, 10):\n+ _PLUGINS = entry_points(group='celery.commands')\n+else:\n+ try:\n+ _PLUGINS = entry_points().get('celery.commands', [])\n+ except AttributeError:\n+ _PLUGINS = entry_points().select(group='celery.commands')\n+\n+\n+@with_plugins(_PLUGINS)\n @click.group(cls=DYMGroup, invoke_without_command=True)\n @click.option('-A',\n '--app',\ndiff --git a/celery/utils/imports.py b/celery/utils/imports.py\n--- a/celery/utils/imports.py\n+++ b/celery/utils/imports.py\n@@ -141,7 +141,14 @@\n \n \n def load_extension_class_names(namespace):\n- for ep in entry_points().get(namespace, []):\n+ if sys.version_info >= (3, 10):\n+ _entry_points = entry_points(group=namespace)\n+ else:\n+ try:\n+ _entry_points = entry_points().get(namespace, [])\n+ except AttributeError:\n+ _entry_points = entry_points().select(group=namespace)\n+ for ep in _entry_points:\n yield ep.name, ep.value\n", "issue": "Celery Import Error\n<!--\r\nPlease use one of our issue templates.\r\nWe reserve the right to close bug reports or feature requests who don't use our templates.\r\n-->\r\nNot able to import Celery module when creating simple app.\r\n\r\nfrom celery import Celery\r\nImportError: cannot import name 'Celery' from 'celery'\r\n\r\nAdditonal info:\r\nThis issue doesn't occur when we downgrade importlib-metadata to 4.12.0\r\nEnv details\r\n\r\n\n", "before_files": [{"content": "\"\"\"Utilities related to importing modules and symbols by name.\"\"\"\nimport os\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module, reload\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nfrom kombu.utils.imports import symbol_by_name\n\n#: Billiard sets this when execv is enabled.\n#: We use it to find out the name of the original ``__main__``\n#: module, so that we can properly rewrite the name of the\n#: task to be that of ``App.main``.\nMP_MAIN_FILE = os.environ.get('MP_MAIN_FILE')\n\n__all__ = (\n 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name',\n 'cwd_in_path', 'find_module', 'import_from_cwd',\n 'reload_from_cwd', 'module_file', 'gen_task_name',\n)\n\n\nclass NotAPackage(Exception):\n \"\"\"Raised when importing a package, but it's not a package.\"\"\"\n\n\ndef qualname(obj):\n \"\"\"Return object name.\"\"\"\n if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):\n obj = obj.__class__\n q = getattr(obj, '__qualname__', None)\n if '.' not in q:\n q = '.'.join((obj.__module__, q))\n return q\n\n\ndef instantiate(name, *args, **kwargs):\n \"\"\"Instantiate class by name.\n\n See Also:\n :func:`symbol_by_name`.\n \"\"\"\n return symbol_by_name(name)(*args, **kwargs)\n\n\n@contextmanager\ndef cwd_in_path():\n \"\"\"Context adding the current working directory to sys.path.\"\"\"\n cwd = os.getcwd()\n if cwd in sys.path:\n yield\n else:\n sys.path.insert(0, cwd)\n try:\n yield cwd\n finally:\n try:\n sys.path.remove(cwd)\n except ValueError: # pragma: no cover\n pass\n\n\ndef find_module(module, path=None, imp=None):\n \"\"\"Version of :func:`imp.find_module` supporting dots.\"\"\"\n if imp is None:\n imp = import_module\n with cwd_in_path():\n try:\n return imp(module)\n except ImportError:\n # Raise a more specific error if the problem is that one of the\n # dot-separated segments of the module name is not a package.\n if '.' in module:\n parts = module.split('.')\n for i, part in enumerate(parts[:-1]):\n package = '.'.join(parts[:i + 1])\n try:\n mpart = imp(package)\n except ImportError:\n # Break out and re-raise the original ImportError\n # instead.\n break\n try:\n mpart.__path__\n except AttributeError:\n raise NotAPackage(package)\n raise\n\n\ndef import_from_cwd(module, imp=None, package=None):\n \"\"\"Import module, temporarily including modules in the current directory.\n\n Modules located in the current directory has\n precedence over modules located in `sys.path`.\n \"\"\"\n if imp is None:\n imp = import_module\n with cwd_in_path():\n return imp(module, package=package)\n\n\ndef reload_from_cwd(module, reloader=None):\n \"\"\"Reload module (ensuring that CWD is in sys.path).\"\"\"\n if reloader is None:\n reloader = reload\n with cwd_in_path():\n return reloader(module)\n\n\ndef module_file(module):\n \"\"\"Return the correct original file name of a module.\"\"\"\n name = module.__file__\n return name[:-1] if name.endswith('.pyc') else name\n\n\ndef gen_task_name(app, name, module_name):\n \"\"\"Generate task name from name/module pair.\"\"\"\n module_name = module_name or '__main__'\n try:\n module = sys.modules[module_name]\n except KeyError:\n # Fix for manage.py shell_plus (Issue #366)\n module = None\n\n if module is not None:\n module_name = module.__name__\n # - If the task module is used as the __main__ script\n # - we need to rewrite the module part of the task name\n # - to match App.main.\n if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:\n # - see comment about :envvar:`MP_MAIN_FILE` above.\n module_name = '__main__'\n if module_name == '__main__' and app.main:\n return '.'.join([app.main, name])\n return '.'.join(p for p in (module_name, name) if p)\n\n\ndef load_extension_class_names(namespace):\n for ep in entry_points().get(namespace, []):\n yield ep.name, ep.value\n\n\ndef load_extension_classes(namespace):\n for name, class_name in load_extension_class_names(namespace):\n try:\n cls = symbol_by_name(class_name)\n except (ImportError, SyntaxError) as exc:\n warnings.warn(\n f'Cannot load {namespace} extension {class_name!r}: {exc!r}')\n else:\n yield name, cls\n", "path": "celery/utils/imports.py"}, {"content": "\"\"\"Celery Command Line Interface.\"\"\"\nimport os\nimport pathlib\nimport traceback\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nimport click\nimport click.exceptions\nfrom click.types import ParamType\nfrom click_didyoumean import DYMGroup\nfrom click_plugins import with_plugins\n\nfrom celery import VERSION_BANNER\nfrom celery.app.utils import find_app\nfrom celery.bin.amqp import amqp\nfrom celery.bin.base import CeleryCommand, CeleryOption, CLIContext\nfrom celery.bin.beat import beat\nfrom celery.bin.call import call\nfrom celery.bin.control import control, inspect, status\nfrom celery.bin.events import events\nfrom celery.bin.graph import graph\nfrom celery.bin.list import list_\nfrom celery.bin.logtool import logtool\nfrom celery.bin.migrate import migrate\nfrom celery.bin.multi import multi\nfrom celery.bin.purge import purge\nfrom celery.bin.result import result\nfrom celery.bin.shell import shell\nfrom celery.bin.upgrade import upgrade\nfrom celery.bin.worker import worker\n\nUNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(\"\"\"\nUnable to load celery application.\nThe module {0} was not found.\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(\"\"\"\nUnable to load celery application.\nWhile trying to load the module {0} the following error occurred:\n{1}\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_APP_MISSING = click.style(\"\"\"\nUnable to load celery application.\n{0}\"\"\")\n\n\nclass App(ParamType):\n \"\"\"Application option.\"\"\"\n\n name = \"application\"\n\n def convert(self, value, param, ctx):\n try:\n return find_app(value)\n except ModuleNotFoundError as e:\n if e.name != value:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))\n except AttributeError as e:\n attribute_name = e.args[0].capitalize()\n self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))\n except Exception:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n\n\nAPP = App()\n\n\n@with_plugins(entry_points().get('celery.commands', []))\[email protected](cls=DYMGroup, invoke_without_command=True)\[email protected]('-A',\n '--app',\n envvar='APP',\n cls=CeleryOption,\n type=APP,\n help_group=\"Global Options\")\[email protected]('-b',\n '--broker',\n envvar='BROKER_URL',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--result-backend',\n envvar='RESULT_BACKEND',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--loader',\n envvar='LOADER',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--config',\n envvar='CONFIG_MODULE',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--workdir',\n cls=CeleryOption,\n type=pathlib.Path,\n callback=lambda _, __, wd: os.chdir(wd) if wd else None,\n is_eager=True,\n help_group=\"Global Options\")\[email protected]('-C',\n '--no-color',\n envvar='NO_COLOR',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('-q',\n '--quiet',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--version',\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Global Options\")\[email protected]_context\ndef celery(ctx, app, broker, result_backend, loader, config, workdir,\n no_color, quiet, version):\n \"\"\"Celery command entrypoint.\"\"\"\n if version:\n click.echo(VERSION_BANNER)\n ctx.exit()\n elif ctx.invoked_subcommand is None:\n click.echo(ctx.get_help())\n ctx.exit()\n\n if loader:\n # Default app takes loader from this env (Issue #1066).\n os.environ['CELERY_LOADER'] = loader\n if broker:\n os.environ['CELERY_BROKER_URL'] = broker\n if result_backend:\n os.environ['CELERY_RESULT_BACKEND'] = result_backend\n if config:\n os.environ['CELERY_CONFIG_MODULE'] = config\n ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,\n quiet=quiet)\n\n # User options\n worker.params.extend(ctx.obj.app.user_options.get('worker', []))\n beat.params.extend(ctx.obj.app.user_options.get('beat', []))\n events.params.extend(ctx.obj.app.user_options.get('events', []))\n\n for command in celery.commands.values():\n command.params.extend(ctx.obj.app.user_options.get('preload', []))\n\n\[email protected](cls=CeleryCommand)\[email protected]_context\ndef report(ctx):\n \"\"\"Shows information useful to include in bug-reports.\"\"\"\n app = ctx.obj.app\n app.loader.import_default_modules()\n ctx.obj.echo(app.bugreport())\n\n\ncelery.add_command(purge)\ncelery.add_command(call)\ncelery.add_command(beat)\ncelery.add_command(list_)\ncelery.add_command(result)\ncelery.add_command(migrate)\ncelery.add_command(status)\ncelery.add_command(worker)\ncelery.add_command(events)\ncelery.add_command(inspect)\ncelery.add_command(control)\ncelery.add_command(graph)\ncelery.add_command(upgrade)\ncelery.add_command(logtool)\ncelery.add_command(amqp)\ncelery.add_command(shell)\ncelery.add_command(multi)\n\n# Monkey-patch click to display a custom error\n# when -A or --app are used as sub-command options instead of as options\n# of the global command.\n\nprevious_show_implementation = click.exceptions.NoSuchOption.show\n\nWRONG_APP_OPTION_USAGE_MESSAGE = \"\"\"You are using `{option_name}` as an option of the {info_name} sub-command:\ncelery {info_name} {option_name} celeryapp <...>\n\nThe support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:\ncelery {option_name} celeryapp {info_name} <...>\"\"\"\n\n\ndef _show(self, file=None):\n if self.option_name in ('-A', '--app'):\n self.ctx.obj.error(\n WRONG_APP_OPTION_USAGE_MESSAGE.format(\n option_name=self.option_name,\n info_name=self.ctx.info_name),\n fg='red'\n )\n previous_show_implementation(self, file=file)\n\n\nclick.exceptions.NoSuchOption.show = _show\n\n\ndef main() -> int:\n \"\"\"Start celery umbrella command.\n\n This function is the main entrypoint for the CLI.\n\n :return: The exit code of the CLI.\n \"\"\"\n return celery(auto_envvar_prefix=\"CELERY\")\n", "path": "celery/bin/celery.py"}], "after_files": [{"content": "\"\"\"Utilities related to importing modules and symbols by name.\"\"\"\nimport os\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module, reload\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nfrom kombu.utils.imports import symbol_by_name\n\n#: Billiard sets this when execv is enabled.\n#: We use it to find out the name of the original ``__main__``\n#: module, so that we can properly rewrite the name of the\n#: task to be that of ``App.main``.\nMP_MAIN_FILE = os.environ.get('MP_MAIN_FILE')\n\n__all__ = (\n 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name',\n 'cwd_in_path', 'find_module', 'import_from_cwd',\n 'reload_from_cwd', 'module_file', 'gen_task_name',\n)\n\n\nclass NotAPackage(Exception):\n \"\"\"Raised when importing a package, but it's not a package.\"\"\"\n\n\ndef qualname(obj):\n \"\"\"Return object name.\"\"\"\n if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):\n obj = obj.__class__\n q = getattr(obj, '__qualname__', None)\n if '.' not in q:\n q = '.'.join((obj.__module__, q))\n return q\n\n\ndef instantiate(name, *args, **kwargs):\n \"\"\"Instantiate class by name.\n\n See Also:\n :func:`symbol_by_name`.\n \"\"\"\n return symbol_by_name(name)(*args, **kwargs)\n\n\n@contextmanager\ndef cwd_in_path():\n \"\"\"Context adding the current working directory to sys.path.\"\"\"\n cwd = os.getcwd()\n if cwd in sys.path:\n yield\n else:\n sys.path.insert(0, cwd)\n try:\n yield cwd\n finally:\n try:\n sys.path.remove(cwd)\n except ValueError: # pragma: no cover\n pass\n\n\ndef find_module(module, path=None, imp=None):\n \"\"\"Version of :func:`imp.find_module` supporting dots.\"\"\"\n if imp is None:\n imp = import_module\n with cwd_in_path():\n try:\n return imp(module)\n except ImportError:\n # Raise a more specific error if the problem is that one of the\n # dot-separated segments of the module name is not a package.\n if '.' in module:\n parts = module.split('.')\n for i, part in enumerate(parts[:-1]):\n package = '.'.join(parts[:i + 1])\n try:\n mpart = imp(package)\n except ImportError:\n # Break out and re-raise the original ImportError\n # instead.\n break\n try:\n mpart.__path__\n except AttributeError:\n raise NotAPackage(package)\n raise\n\n\ndef import_from_cwd(module, imp=None, package=None):\n \"\"\"Import module, temporarily including modules in the current directory.\n\n Modules located in the current directory has\n precedence over modules located in `sys.path`.\n \"\"\"\n if imp is None:\n imp = import_module\n with cwd_in_path():\n return imp(module, package=package)\n\n\ndef reload_from_cwd(module, reloader=None):\n \"\"\"Reload module (ensuring that CWD is in sys.path).\"\"\"\n if reloader is None:\n reloader = reload\n with cwd_in_path():\n return reloader(module)\n\n\ndef module_file(module):\n \"\"\"Return the correct original file name of a module.\"\"\"\n name = module.__file__\n return name[:-1] if name.endswith('.pyc') else name\n\n\ndef gen_task_name(app, name, module_name):\n \"\"\"Generate task name from name/module pair.\"\"\"\n module_name = module_name or '__main__'\n try:\n module = sys.modules[module_name]\n except KeyError:\n # Fix for manage.py shell_plus (Issue #366)\n module = None\n\n if module is not None:\n module_name = module.__name__\n # - If the task module is used as the __main__ script\n # - we need to rewrite the module part of the task name\n # - to match App.main.\n if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:\n # - see comment about :envvar:`MP_MAIN_FILE` above.\n module_name = '__main__'\n if module_name == '__main__' and app.main:\n return '.'.join([app.main, name])\n return '.'.join(p for p in (module_name, name) if p)\n\n\ndef load_extension_class_names(namespace):\n if sys.version_info >= (3, 10):\n _entry_points = entry_points(group=namespace)\n else:\n try:\n _entry_points = entry_points().get(namespace, [])\n except AttributeError:\n _entry_points = entry_points().select(group=namespace)\n for ep in _entry_points:\n yield ep.name, ep.value\n\n\ndef load_extension_classes(namespace):\n for name, class_name in load_extension_class_names(namespace):\n try:\n cls = symbol_by_name(class_name)\n except (ImportError, SyntaxError) as exc:\n warnings.warn(\n f'Cannot load {namespace} extension {class_name!r}: {exc!r}')\n else:\n yield name, cls\n", "path": "celery/utils/imports.py"}, {"content": "\"\"\"Celery Command Line Interface.\"\"\"\nimport os\nimport pathlib\nimport sys\nimport traceback\n\ntry:\n from importlib.metadata import entry_points\nexcept ImportError:\n from importlib_metadata import entry_points\n\nimport click\nimport click.exceptions\nfrom click.types import ParamType\nfrom click_didyoumean import DYMGroup\nfrom click_plugins import with_plugins\n\nfrom celery import VERSION_BANNER\nfrom celery.app.utils import find_app\nfrom celery.bin.amqp import amqp\nfrom celery.bin.base import CeleryCommand, CeleryOption, CLIContext\nfrom celery.bin.beat import beat\nfrom celery.bin.call import call\nfrom celery.bin.control import control, inspect, status\nfrom celery.bin.events import events\nfrom celery.bin.graph import graph\nfrom celery.bin.list import list_\nfrom celery.bin.logtool import logtool\nfrom celery.bin.migrate import migrate\nfrom celery.bin.multi import multi\nfrom celery.bin.purge import purge\nfrom celery.bin.result import result\nfrom celery.bin.shell import shell\nfrom celery.bin.upgrade import upgrade\nfrom celery.bin.worker import worker\n\nUNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(\"\"\"\nUnable to load celery application.\nThe module {0} was not found.\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(\"\"\"\nUnable to load celery application.\nWhile trying to load the module {0} the following error occurred:\n{1}\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_APP_MISSING = click.style(\"\"\"\nUnable to load celery application.\n{0}\"\"\")\n\n\nclass App(ParamType):\n \"\"\"Application option.\"\"\"\n\n name = \"application\"\n\n def convert(self, value, param, ctx):\n try:\n return find_app(value)\n except ModuleNotFoundError as e:\n if e.name != value:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))\n except AttributeError as e:\n attribute_name = e.args[0].capitalize()\n self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))\n except Exception:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n\n\nAPP = App()\n\n\nif sys.version_info >= (3, 10):\n _PLUGINS = entry_points(group='celery.commands')\nelse:\n try:\n _PLUGINS = entry_points().get('celery.commands', [])\n except AttributeError:\n _PLUGINS = entry_points().select(group='celery.commands')\n\n\n@with_plugins(_PLUGINS)\[email protected](cls=DYMGroup, invoke_without_command=True)\[email protected]('-A',\n '--app',\n envvar='APP',\n cls=CeleryOption,\n type=APP,\n help_group=\"Global Options\")\[email protected]('-b',\n '--broker',\n envvar='BROKER_URL',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--result-backend',\n envvar='RESULT_BACKEND',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--loader',\n envvar='LOADER',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--config',\n envvar='CONFIG_MODULE',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--workdir',\n cls=CeleryOption,\n type=pathlib.Path,\n callback=lambda _, __, wd: os.chdir(wd) if wd else None,\n is_eager=True,\n help_group=\"Global Options\")\[email protected]('-C',\n '--no-color',\n envvar='NO_COLOR',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('-q',\n '--quiet',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--version',\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Global Options\")\[email protected]_context\ndef celery(ctx, app, broker, result_backend, loader, config, workdir,\n no_color, quiet, version):\n \"\"\"Celery command entrypoint.\"\"\"\n if version:\n click.echo(VERSION_BANNER)\n ctx.exit()\n elif ctx.invoked_subcommand is None:\n click.echo(ctx.get_help())\n ctx.exit()\n\n if loader:\n # Default app takes loader from this env (Issue #1066).\n os.environ['CELERY_LOADER'] = loader\n if broker:\n os.environ['CELERY_BROKER_URL'] = broker\n if result_backend:\n os.environ['CELERY_RESULT_BACKEND'] = result_backend\n if config:\n os.environ['CELERY_CONFIG_MODULE'] = config\n ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,\n quiet=quiet)\n\n # User options\n worker.params.extend(ctx.obj.app.user_options.get('worker', []))\n beat.params.extend(ctx.obj.app.user_options.get('beat', []))\n events.params.extend(ctx.obj.app.user_options.get('events', []))\n\n for command in celery.commands.values():\n command.params.extend(ctx.obj.app.user_options.get('preload', []))\n\n\[email protected](cls=CeleryCommand)\[email protected]_context\ndef report(ctx):\n \"\"\"Shows information useful to include in bug-reports.\"\"\"\n app = ctx.obj.app\n app.loader.import_default_modules()\n ctx.obj.echo(app.bugreport())\n\n\ncelery.add_command(purge)\ncelery.add_command(call)\ncelery.add_command(beat)\ncelery.add_command(list_)\ncelery.add_command(result)\ncelery.add_command(migrate)\ncelery.add_command(status)\ncelery.add_command(worker)\ncelery.add_command(events)\ncelery.add_command(inspect)\ncelery.add_command(control)\ncelery.add_command(graph)\ncelery.add_command(upgrade)\ncelery.add_command(logtool)\ncelery.add_command(amqp)\ncelery.add_command(shell)\ncelery.add_command(multi)\n\n# Monkey-patch click to display a custom error\n# when -A or --app are used as sub-command options instead of as options\n# of the global command.\n\nprevious_show_implementation = click.exceptions.NoSuchOption.show\n\nWRONG_APP_OPTION_USAGE_MESSAGE = \"\"\"You are using `{option_name}` as an option of the {info_name} sub-command:\ncelery {info_name} {option_name} celeryapp <...>\n\nThe support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:\ncelery {option_name} celeryapp {info_name} <...>\"\"\"\n\n\ndef _show(self, file=None):\n if self.option_name in ('-A', '--app'):\n self.ctx.obj.error(\n WRONG_APP_OPTION_USAGE_MESSAGE.format(\n option_name=self.option_name,\n info_name=self.ctx.info_name),\n fg='red'\n )\n previous_show_implementation(self, file=file)\n\n\nclick.exceptions.NoSuchOption.show = _show\n\n\ndef main() -> int:\n \"\"\"Start celery umbrella command.\n\n This function is the main entrypoint for the CLI.\n\n :return: The exit code of the CLI.\n \"\"\"\n return celery(auto_envvar_prefix=\"CELERY\")\n", "path": "celery/bin/celery.py"}]}
| 3,918 | 372 |
gh_patches_debug_17077
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-2169
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Extreme Networks SummitStack as port types
<!--
Before opening a new issue, please search through the existing issues to
see if your topic has already been addressed. Note that you may need to
remove the "is:open" filter from the search bar to include closed issues.
Check the appropriate type for your issue below by placing an x between the
brackets. For assistance with installation issues, or for any other issues
other than those listed below, please raise your topic for discussion on
our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please note that issues which do not fall under any of the below categories
will be closed. Due to an excessive backlog of feature requests, we are
not currently accepting any proposals which extend NetBox's feature scope.
Do not prepend any sort of tag to your issue's title. An administrator will
review your issue and assign labels as appropriate.
--->
### Issue type
[X] Feature request <!-- An enhancement of existing functionality -->
[ ] Bug report <!-- Unexpected or erroneous behavior -->
[ ] Documentation <!-- A modification to the documentation -->
<!--
Please describe the environment in which you are running NetBox. (Be sure
to verify that you are running the latest stable release of NetBox before
submitting a bug report.) If you are submitting a bug report and have made
any changes to the code base, please first validate that your bug can be
recreated while running an official release.
-->
### Environment
* Python version: 3.5.2
* NetBox version: 2.3.5
<!--
BUG REPORTS must include:
* A list of the steps needed for someone else to reproduce the bug
* A description of the expected and observed behavior
* Any relevant error messages (screenshots may also help)
FEATURE REQUESTS must include:
* A detailed description of the proposed functionality
* A use case for the new feature
* A rough description of any necessary changes to the database schema
* Any relevant third-party libraries which would be needed
-->
### Description
Adding support for Extreme stacking ports is fairly easy, just requires updating the constants.py to include the SummitStack, SummitStack128, and SummitStack512 port types under stacking.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/dcim/constants.py`
Content:
```
1 from __future__ import unicode_literals
2
3
4 # Rack types
5 RACK_TYPE_2POST = 100
6 RACK_TYPE_4POST = 200
7 RACK_TYPE_CABINET = 300
8 RACK_TYPE_WALLFRAME = 1000
9 RACK_TYPE_WALLCABINET = 1100
10 RACK_TYPE_CHOICES = (
11 (RACK_TYPE_2POST, '2-post frame'),
12 (RACK_TYPE_4POST, '4-post frame'),
13 (RACK_TYPE_CABINET, '4-post cabinet'),
14 (RACK_TYPE_WALLFRAME, 'Wall-mounted frame'),
15 (RACK_TYPE_WALLCABINET, 'Wall-mounted cabinet'),
16 )
17
18 # Rack widths
19 RACK_WIDTH_19IN = 19
20 RACK_WIDTH_23IN = 23
21 RACK_WIDTH_CHOICES = (
22 (RACK_WIDTH_19IN, '19 inches'),
23 (RACK_WIDTH_23IN, '23 inches'),
24 )
25
26 # Rack faces
27 RACK_FACE_FRONT = 0
28 RACK_FACE_REAR = 1
29 RACK_FACE_CHOICES = [
30 [RACK_FACE_FRONT, 'Front'],
31 [RACK_FACE_REAR, 'Rear'],
32 ]
33
34 # Parent/child device roles
35 SUBDEVICE_ROLE_PARENT = True
36 SUBDEVICE_ROLE_CHILD = False
37 SUBDEVICE_ROLE_CHOICES = (
38 (None, 'None'),
39 (SUBDEVICE_ROLE_PARENT, 'Parent'),
40 (SUBDEVICE_ROLE_CHILD, 'Child'),
41 )
42
43 # Interface ordering schemes (for device types)
44 IFACE_ORDERING_POSITION = 1
45 IFACE_ORDERING_NAME = 2
46 IFACE_ORDERING_CHOICES = [
47 [IFACE_ORDERING_POSITION, 'Slot/position'],
48 [IFACE_ORDERING_NAME, 'Name (alphabetically)']
49 ]
50
51 # Interface form factors
52 # Virtual
53 IFACE_FF_VIRTUAL = 0
54 IFACE_FF_LAG = 200
55 # Ethernet
56 IFACE_FF_100ME_FIXED = 800
57 IFACE_FF_1GE_FIXED = 1000
58 IFACE_FF_1GE_GBIC = 1050
59 IFACE_FF_1GE_SFP = 1100
60 IFACE_FF_10GE_FIXED = 1150
61 IFACE_FF_10GE_CX4 = 1170
62 IFACE_FF_10GE_SFP_PLUS = 1200
63 IFACE_FF_10GE_XFP = 1300
64 IFACE_FF_10GE_XENPAK = 1310
65 IFACE_FF_10GE_X2 = 1320
66 IFACE_FF_25GE_SFP28 = 1350
67 IFACE_FF_40GE_QSFP_PLUS = 1400
68 IFACE_FF_100GE_CFP = 1500
69 IFACE_FF_100GE_CFP2 = 1510
70 IFACE_FF_100GE_CFP4 = 1520
71 IFACE_FF_100GE_CPAK = 1550
72 IFACE_FF_100GE_QSFP28 = 1600
73 # Wireless
74 IFACE_FF_80211A = 2600
75 IFACE_FF_80211G = 2610
76 IFACE_FF_80211N = 2620
77 IFACE_FF_80211AC = 2630
78 IFACE_FF_80211AD = 2640
79 # Fibrechannel
80 IFACE_FF_1GFC_SFP = 3010
81 IFACE_FF_2GFC_SFP = 3020
82 IFACE_FF_4GFC_SFP = 3040
83 IFACE_FF_8GFC_SFP_PLUS = 3080
84 IFACE_FF_16GFC_SFP_PLUS = 3160
85 # Serial
86 IFACE_FF_T1 = 4000
87 IFACE_FF_E1 = 4010
88 IFACE_FF_T3 = 4040
89 IFACE_FF_E3 = 4050
90 # Stacking
91 IFACE_FF_STACKWISE = 5000
92 IFACE_FF_STACKWISE_PLUS = 5050
93 IFACE_FF_FLEXSTACK = 5100
94 IFACE_FF_FLEXSTACK_PLUS = 5150
95 IFACE_FF_JUNIPER_VCP = 5200
96 # Other
97 IFACE_FF_OTHER = 32767
98
99 IFACE_FF_CHOICES = [
100 [
101 'Virtual interfaces',
102 [
103 [IFACE_FF_VIRTUAL, 'Virtual'],
104 [IFACE_FF_LAG, 'Link Aggregation Group (LAG)'],
105 ],
106 ],
107 [
108 'Ethernet (fixed)',
109 [
110 [IFACE_FF_100ME_FIXED, '100BASE-TX (10/100ME)'],
111 [IFACE_FF_1GE_FIXED, '1000BASE-T (1GE)'],
112 [IFACE_FF_10GE_FIXED, '10GBASE-T (10GE)'],
113 [IFACE_FF_10GE_CX4, '10GBASE-CX4 (10GE)'],
114 ]
115 ],
116 [
117 'Ethernet (modular)',
118 [
119 [IFACE_FF_1GE_GBIC, 'GBIC (1GE)'],
120 [IFACE_FF_1GE_SFP, 'SFP (1GE)'],
121 [IFACE_FF_10GE_SFP_PLUS, 'SFP+ (10GE)'],
122 [IFACE_FF_10GE_XFP, 'XFP (10GE)'],
123 [IFACE_FF_10GE_XENPAK, 'XENPAK (10GE)'],
124 [IFACE_FF_10GE_X2, 'X2 (10GE)'],
125 [IFACE_FF_25GE_SFP28, 'SFP28 (25GE)'],
126 [IFACE_FF_40GE_QSFP_PLUS, 'QSFP+ (40GE)'],
127 [IFACE_FF_100GE_CFP, 'CFP (100GE)'],
128 [IFACE_FF_100GE_CFP2, 'CFP2 (100GE)'],
129 [IFACE_FF_100GE_CFP4, 'CFP4 (100GE)'],
130 [IFACE_FF_100GE_CPAK, 'Cisco CPAK (100GE)'],
131 [IFACE_FF_100GE_QSFP28, 'QSFP28 (100GE)'],
132 ]
133 ],
134 [
135 'Wireless',
136 [
137 [IFACE_FF_80211A, 'IEEE 802.11a'],
138 [IFACE_FF_80211G, 'IEEE 802.11b/g'],
139 [IFACE_FF_80211N, 'IEEE 802.11n'],
140 [IFACE_FF_80211AC, 'IEEE 802.11ac'],
141 [IFACE_FF_80211AD, 'IEEE 802.11ad'],
142 ]
143 ],
144 [
145 'FibreChannel',
146 [
147 [IFACE_FF_1GFC_SFP, 'SFP (1GFC)'],
148 [IFACE_FF_2GFC_SFP, 'SFP (2GFC)'],
149 [IFACE_FF_4GFC_SFP, 'SFP (4GFC)'],
150 [IFACE_FF_8GFC_SFP_PLUS, 'SFP+ (8GFC)'],
151 [IFACE_FF_16GFC_SFP_PLUS, 'SFP+ (16GFC)'],
152 ]
153 ],
154 [
155 'Serial',
156 [
157 [IFACE_FF_T1, 'T1 (1.544 Mbps)'],
158 [IFACE_FF_E1, 'E1 (2.048 Mbps)'],
159 [IFACE_FF_T3, 'T3 (45 Mbps)'],
160 [IFACE_FF_E3, 'E3 (34 Mbps)'],
161 ]
162 ],
163 [
164 'Stacking',
165 [
166 [IFACE_FF_STACKWISE, 'Cisco StackWise'],
167 [IFACE_FF_STACKWISE_PLUS, 'Cisco StackWise Plus'],
168 [IFACE_FF_FLEXSTACK, 'Cisco FlexStack'],
169 [IFACE_FF_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'],
170 [IFACE_FF_JUNIPER_VCP, 'Juniper VCP'],
171 ]
172 ],
173 [
174 'Other',
175 [
176 [IFACE_FF_OTHER, 'Other'],
177 ]
178 ],
179 ]
180
181 VIRTUAL_IFACE_TYPES = [
182 IFACE_FF_VIRTUAL,
183 IFACE_FF_LAG,
184 ]
185
186 WIRELESS_IFACE_TYPES = [
187 IFACE_FF_80211A,
188 IFACE_FF_80211G,
189 IFACE_FF_80211N,
190 IFACE_FF_80211AC,
191 IFACE_FF_80211AD,
192 ]
193
194 NONCONNECTABLE_IFACE_TYPES = VIRTUAL_IFACE_TYPES + WIRELESS_IFACE_TYPES
195
196 IFACE_MODE_ACCESS = 100
197 IFACE_MODE_TAGGED = 200
198 IFACE_MODE_TAGGED_ALL = 300
199 IFACE_MODE_CHOICES = [
200 [IFACE_MODE_ACCESS, 'Access'],
201 [IFACE_MODE_TAGGED, 'Tagged'],
202 [IFACE_MODE_TAGGED_ALL, 'Tagged All'],
203 ]
204
205 # Device statuses
206 DEVICE_STATUS_OFFLINE = 0
207 DEVICE_STATUS_ACTIVE = 1
208 DEVICE_STATUS_PLANNED = 2
209 DEVICE_STATUS_STAGED = 3
210 DEVICE_STATUS_FAILED = 4
211 DEVICE_STATUS_INVENTORY = 5
212 DEVICE_STATUS_CHOICES = [
213 [DEVICE_STATUS_ACTIVE, 'Active'],
214 [DEVICE_STATUS_OFFLINE, 'Offline'],
215 [DEVICE_STATUS_PLANNED, 'Planned'],
216 [DEVICE_STATUS_STAGED, 'Staged'],
217 [DEVICE_STATUS_FAILED, 'Failed'],
218 [DEVICE_STATUS_INVENTORY, 'Inventory'],
219 ]
220
221 # Site statuses
222 SITE_STATUS_ACTIVE = 1
223 SITE_STATUS_PLANNED = 2
224 SITE_STATUS_RETIRED = 4
225 SITE_STATUS_CHOICES = [
226 [SITE_STATUS_ACTIVE, 'Active'],
227 [SITE_STATUS_PLANNED, 'Planned'],
228 [SITE_STATUS_RETIRED, 'Retired'],
229 ]
230
231 # Bootstrap CSS classes for device statuses
232 STATUS_CLASSES = {
233 0: 'warning',
234 1: 'success',
235 2: 'info',
236 3: 'primary',
237 4: 'danger',
238 5: 'default',
239 }
240
241 # Console/power/interface connection statuses
242 CONNECTION_STATUS_PLANNED = False
243 CONNECTION_STATUS_CONNECTED = True
244 CONNECTION_STATUS_CHOICES = [
245 [CONNECTION_STATUS_PLANNED, 'Planned'],
246 [CONNECTION_STATUS_CONNECTED, 'Connected'],
247 ]
248
249 # Platform -> RPC client mappings
250 RPC_CLIENT_JUNIPER_JUNOS = 'juniper-junos'
251 RPC_CLIENT_CISCO_IOS = 'cisco-ios'
252 RPC_CLIENT_OPENGEAR = 'opengear'
253 RPC_CLIENT_CHOICES = [
254 [RPC_CLIENT_JUNIPER_JUNOS, 'Juniper Junos (NETCONF)'],
255 [RPC_CLIENT_CISCO_IOS, 'Cisco IOS (SSH)'],
256 [RPC_CLIENT_OPENGEAR, 'Opengear (SSH)'],
257 ]
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/dcim/constants.py b/netbox/dcim/constants.py
--- a/netbox/dcim/constants.py
+++ b/netbox/dcim/constants.py
@@ -93,6 +93,11 @@
IFACE_FF_FLEXSTACK = 5100
IFACE_FF_FLEXSTACK_PLUS = 5150
IFACE_FF_JUNIPER_VCP = 5200
+IFACE_FF_EXTREME_SS = 5300
+IFACE_FF_EXTREME_SS128 = 5310
+IFACE_FF_EXTREME_SS256 = 5320
+IFACE_FF_EXTREME_SS512 = 5330
+
# Other
IFACE_FF_OTHER = 32767
@@ -168,6 +173,10 @@
[IFACE_FF_FLEXSTACK, 'Cisco FlexStack'],
[IFACE_FF_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'],
[IFACE_FF_JUNIPER_VCP, 'Juniper VCP'],
+ [IFACE_FF_EXTREME_SS, 'Extreme SummitStack'],
+ [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-128'],
+ [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-256'],
+ [IFACE_FF_EXTREME_SS512, 'Extreme SummitStack-512'],
]
],
[
|
{"golden_diff": "diff --git a/netbox/dcim/constants.py b/netbox/dcim/constants.py\n--- a/netbox/dcim/constants.py\n+++ b/netbox/dcim/constants.py\n@@ -93,6 +93,11 @@\n IFACE_FF_FLEXSTACK = 5100\n IFACE_FF_FLEXSTACK_PLUS = 5150\n IFACE_FF_JUNIPER_VCP = 5200\n+IFACE_FF_EXTREME_SS = 5300\n+IFACE_FF_EXTREME_SS128 = 5310\n+IFACE_FF_EXTREME_SS256 = 5320\n+IFACE_FF_EXTREME_SS512 = 5330\n+\n # Other\n IFACE_FF_OTHER = 32767\n \n@@ -168,6 +173,10 @@\n [IFACE_FF_FLEXSTACK, 'Cisco FlexStack'],\n [IFACE_FF_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'],\n [IFACE_FF_JUNIPER_VCP, 'Juniper VCP'],\n+ [IFACE_FF_EXTREME_SS, 'Extreme SummitStack'],\n+ [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-128'],\n+ [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-256'],\n+ [IFACE_FF_EXTREME_SS512, 'Extreme SummitStack-512'],\n ]\n ],\n [\n", "issue": "Add Extreme Networks SummitStack as port types\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[X] Feature request <!-- An enhancement of existing functionality -->\r\n[ ] Bug report <!-- Unexpected or erroneous behavior -->\r\n[ ] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: 3.5.2\r\n* NetBox version: 2.3.5\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\nAdding support for Extreme stacking ports is fairly easy, just requires updating the constants.py to include the SummitStack, SummitStack128, and SummitStack512 port types under stacking.\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\n\n# Rack types\nRACK_TYPE_2POST = 100\nRACK_TYPE_4POST = 200\nRACK_TYPE_CABINET = 300\nRACK_TYPE_WALLFRAME = 1000\nRACK_TYPE_WALLCABINET = 1100\nRACK_TYPE_CHOICES = (\n (RACK_TYPE_2POST, '2-post frame'),\n (RACK_TYPE_4POST, '4-post frame'),\n (RACK_TYPE_CABINET, '4-post cabinet'),\n (RACK_TYPE_WALLFRAME, 'Wall-mounted frame'),\n (RACK_TYPE_WALLCABINET, 'Wall-mounted cabinet'),\n)\n\n# Rack widths\nRACK_WIDTH_19IN = 19\nRACK_WIDTH_23IN = 23\nRACK_WIDTH_CHOICES = (\n (RACK_WIDTH_19IN, '19 inches'),\n (RACK_WIDTH_23IN, '23 inches'),\n)\n\n# Rack faces\nRACK_FACE_FRONT = 0\nRACK_FACE_REAR = 1\nRACK_FACE_CHOICES = [\n [RACK_FACE_FRONT, 'Front'],\n [RACK_FACE_REAR, 'Rear'],\n]\n\n# Parent/child device roles\nSUBDEVICE_ROLE_PARENT = True\nSUBDEVICE_ROLE_CHILD = False\nSUBDEVICE_ROLE_CHOICES = (\n (None, 'None'),\n (SUBDEVICE_ROLE_PARENT, 'Parent'),\n (SUBDEVICE_ROLE_CHILD, 'Child'),\n)\n\n# Interface ordering schemes (for device types)\nIFACE_ORDERING_POSITION = 1\nIFACE_ORDERING_NAME = 2\nIFACE_ORDERING_CHOICES = [\n [IFACE_ORDERING_POSITION, 'Slot/position'],\n [IFACE_ORDERING_NAME, 'Name (alphabetically)']\n]\n\n# Interface form factors\n# Virtual\nIFACE_FF_VIRTUAL = 0\nIFACE_FF_LAG = 200\n# Ethernet\nIFACE_FF_100ME_FIXED = 800\nIFACE_FF_1GE_FIXED = 1000\nIFACE_FF_1GE_GBIC = 1050\nIFACE_FF_1GE_SFP = 1100\nIFACE_FF_10GE_FIXED = 1150\nIFACE_FF_10GE_CX4 = 1170\nIFACE_FF_10GE_SFP_PLUS = 1200\nIFACE_FF_10GE_XFP = 1300\nIFACE_FF_10GE_XENPAK = 1310\nIFACE_FF_10GE_X2 = 1320\nIFACE_FF_25GE_SFP28 = 1350\nIFACE_FF_40GE_QSFP_PLUS = 1400\nIFACE_FF_100GE_CFP = 1500\nIFACE_FF_100GE_CFP2 = 1510\nIFACE_FF_100GE_CFP4 = 1520\nIFACE_FF_100GE_CPAK = 1550\nIFACE_FF_100GE_QSFP28 = 1600\n# Wireless\nIFACE_FF_80211A = 2600\nIFACE_FF_80211G = 2610\nIFACE_FF_80211N = 2620\nIFACE_FF_80211AC = 2630\nIFACE_FF_80211AD = 2640\n# Fibrechannel\nIFACE_FF_1GFC_SFP = 3010\nIFACE_FF_2GFC_SFP = 3020\nIFACE_FF_4GFC_SFP = 3040\nIFACE_FF_8GFC_SFP_PLUS = 3080\nIFACE_FF_16GFC_SFP_PLUS = 3160\n# Serial\nIFACE_FF_T1 = 4000\nIFACE_FF_E1 = 4010\nIFACE_FF_T3 = 4040\nIFACE_FF_E3 = 4050\n# Stacking\nIFACE_FF_STACKWISE = 5000\nIFACE_FF_STACKWISE_PLUS = 5050\nIFACE_FF_FLEXSTACK = 5100\nIFACE_FF_FLEXSTACK_PLUS = 5150\nIFACE_FF_JUNIPER_VCP = 5200\n# Other\nIFACE_FF_OTHER = 32767\n\nIFACE_FF_CHOICES = [\n [\n 'Virtual interfaces',\n [\n [IFACE_FF_VIRTUAL, 'Virtual'],\n [IFACE_FF_LAG, 'Link Aggregation Group (LAG)'],\n ],\n ],\n [\n 'Ethernet (fixed)',\n [\n [IFACE_FF_100ME_FIXED, '100BASE-TX (10/100ME)'],\n [IFACE_FF_1GE_FIXED, '1000BASE-T (1GE)'],\n [IFACE_FF_10GE_FIXED, '10GBASE-T (10GE)'],\n [IFACE_FF_10GE_CX4, '10GBASE-CX4 (10GE)'],\n ]\n ],\n [\n 'Ethernet (modular)',\n [\n [IFACE_FF_1GE_GBIC, 'GBIC (1GE)'],\n [IFACE_FF_1GE_SFP, 'SFP (1GE)'],\n [IFACE_FF_10GE_SFP_PLUS, 'SFP+ (10GE)'],\n [IFACE_FF_10GE_XFP, 'XFP (10GE)'],\n [IFACE_FF_10GE_XENPAK, 'XENPAK (10GE)'],\n [IFACE_FF_10GE_X2, 'X2 (10GE)'],\n [IFACE_FF_25GE_SFP28, 'SFP28 (25GE)'],\n [IFACE_FF_40GE_QSFP_PLUS, 'QSFP+ (40GE)'],\n [IFACE_FF_100GE_CFP, 'CFP (100GE)'],\n [IFACE_FF_100GE_CFP2, 'CFP2 (100GE)'],\n [IFACE_FF_100GE_CFP4, 'CFP4 (100GE)'],\n [IFACE_FF_100GE_CPAK, 'Cisco CPAK (100GE)'],\n [IFACE_FF_100GE_QSFP28, 'QSFP28 (100GE)'],\n ]\n ],\n [\n 'Wireless',\n [\n [IFACE_FF_80211A, 'IEEE 802.11a'],\n [IFACE_FF_80211G, 'IEEE 802.11b/g'],\n [IFACE_FF_80211N, 'IEEE 802.11n'],\n [IFACE_FF_80211AC, 'IEEE 802.11ac'],\n [IFACE_FF_80211AD, 'IEEE 802.11ad'],\n ]\n ],\n [\n 'FibreChannel',\n [\n [IFACE_FF_1GFC_SFP, 'SFP (1GFC)'],\n [IFACE_FF_2GFC_SFP, 'SFP (2GFC)'],\n [IFACE_FF_4GFC_SFP, 'SFP (4GFC)'],\n [IFACE_FF_8GFC_SFP_PLUS, 'SFP+ (8GFC)'],\n [IFACE_FF_16GFC_SFP_PLUS, 'SFP+ (16GFC)'],\n ]\n ],\n [\n 'Serial',\n [\n [IFACE_FF_T1, 'T1 (1.544 Mbps)'],\n [IFACE_FF_E1, 'E1 (2.048 Mbps)'],\n [IFACE_FF_T3, 'T3 (45 Mbps)'],\n [IFACE_FF_E3, 'E3 (34 Mbps)'],\n ]\n ],\n [\n 'Stacking',\n [\n [IFACE_FF_STACKWISE, 'Cisco StackWise'],\n [IFACE_FF_STACKWISE_PLUS, 'Cisco StackWise Plus'],\n [IFACE_FF_FLEXSTACK, 'Cisco FlexStack'],\n [IFACE_FF_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'],\n [IFACE_FF_JUNIPER_VCP, 'Juniper VCP'],\n ]\n ],\n [\n 'Other',\n [\n [IFACE_FF_OTHER, 'Other'],\n ]\n ],\n]\n\nVIRTUAL_IFACE_TYPES = [\n IFACE_FF_VIRTUAL,\n IFACE_FF_LAG,\n]\n\nWIRELESS_IFACE_TYPES = [\n IFACE_FF_80211A,\n IFACE_FF_80211G,\n IFACE_FF_80211N,\n IFACE_FF_80211AC,\n IFACE_FF_80211AD,\n]\n\nNONCONNECTABLE_IFACE_TYPES = VIRTUAL_IFACE_TYPES + WIRELESS_IFACE_TYPES\n\nIFACE_MODE_ACCESS = 100\nIFACE_MODE_TAGGED = 200\nIFACE_MODE_TAGGED_ALL = 300\nIFACE_MODE_CHOICES = [\n [IFACE_MODE_ACCESS, 'Access'],\n [IFACE_MODE_TAGGED, 'Tagged'],\n [IFACE_MODE_TAGGED_ALL, 'Tagged All'],\n]\n\n# Device statuses\nDEVICE_STATUS_OFFLINE = 0\nDEVICE_STATUS_ACTIVE = 1\nDEVICE_STATUS_PLANNED = 2\nDEVICE_STATUS_STAGED = 3\nDEVICE_STATUS_FAILED = 4\nDEVICE_STATUS_INVENTORY = 5\nDEVICE_STATUS_CHOICES = [\n [DEVICE_STATUS_ACTIVE, 'Active'],\n [DEVICE_STATUS_OFFLINE, 'Offline'],\n [DEVICE_STATUS_PLANNED, 'Planned'],\n [DEVICE_STATUS_STAGED, 'Staged'],\n [DEVICE_STATUS_FAILED, 'Failed'],\n [DEVICE_STATUS_INVENTORY, 'Inventory'],\n]\n\n# Site statuses\nSITE_STATUS_ACTIVE = 1\nSITE_STATUS_PLANNED = 2\nSITE_STATUS_RETIRED = 4\nSITE_STATUS_CHOICES = [\n [SITE_STATUS_ACTIVE, 'Active'],\n [SITE_STATUS_PLANNED, 'Planned'],\n [SITE_STATUS_RETIRED, 'Retired'],\n]\n\n# Bootstrap CSS classes for device statuses\nSTATUS_CLASSES = {\n 0: 'warning',\n 1: 'success',\n 2: 'info',\n 3: 'primary',\n 4: 'danger',\n 5: 'default',\n}\n\n# Console/power/interface connection statuses\nCONNECTION_STATUS_PLANNED = False\nCONNECTION_STATUS_CONNECTED = True\nCONNECTION_STATUS_CHOICES = [\n [CONNECTION_STATUS_PLANNED, 'Planned'],\n [CONNECTION_STATUS_CONNECTED, 'Connected'],\n]\n\n# Platform -> RPC client mappings\nRPC_CLIENT_JUNIPER_JUNOS = 'juniper-junos'\nRPC_CLIENT_CISCO_IOS = 'cisco-ios'\nRPC_CLIENT_OPENGEAR = 'opengear'\nRPC_CLIENT_CHOICES = [\n [RPC_CLIENT_JUNIPER_JUNOS, 'Juniper Junos (NETCONF)'],\n [RPC_CLIENT_CISCO_IOS, 'Cisco IOS (SSH)'],\n [RPC_CLIENT_OPENGEAR, 'Opengear (SSH)'],\n]\n", "path": "netbox/dcim/constants.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\n\n# Rack types\nRACK_TYPE_2POST = 100\nRACK_TYPE_4POST = 200\nRACK_TYPE_CABINET = 300\nRACK_TYPE_WALLFRAME = 1000\nRACK_TYPE_WALLCABINET = 1100\nRACK_TYPE_CHOICES = (\n (RACK_TYPE_2POST, '2-post frame'),\n (RACK_TYPE_4POST, '4-post frame'),\n (RACK_TYPE_CABINET, '4-post cabinet'),\n (RACK_TYPE_WALLFRAME, 'Wall-mounted frame'),\n (RACK_TYPE_WALLCABINET, 'Wall-mounted cabinet'),\n)\n\n# Rack widths\nRACK_WIDTH_19IN = 19\nRACK_WIDTH_23IN = 23\nRACK_WIDTH_CHOICES = (\n (RACK_WIDTH_19IN, '19 inches'),\n (RACK_WIDTH_23IN, '23 inches'),\n)\n\n# Rack faces\nRACK_FACE_FRONT = 0\nRACK_FACE_REAR = 1\nRACK_FACE_CHOICES = [\n [RACK_FACE_FRONT, 'Front'],\n [RACK_FACE_REAR, 'Rear'],\n]\n\n# Parent/child device roles\nSUBDEVICE_ROLE_PARENT = True\nSUBDEVICE_ROLE_CHILD = False\nSUBDEVICE_ROLE_CHOICES = (\n (None, 'None'),\n (SUBDEVICE_ROLE_PARENT, 'Parent'),\n (SUBDEVICE_ROLE_CHILD, 'Child'),\n)\n\n# Interface ordering schemes (for device types)\nIFACE_ORDERING_POSITION = 1\nIFACE_ORDERING_NAME = 2\nIFACE_ORDERING_CHOICES = [\n [IFACE_ORDERING_POSITION, 'Slot/position'],\n [IFACE_ORDERING_NAME, 'Name (alphabetically)']\n]\n\n# Interface form factors\n# Virtual\nIFACE_FF_VIRTUAL = 0\nIFACE_FF_LAG = 200\n# Ethernet\nIFACE_FF_100ME_FIXED = 800\nIFACE_FF_1GE_FIXED = 1000\nIFACE_FF_1GE_GBIC = 1050\nIFACE_FF_1GE_SFP = 1100\nIFACE_FF_10GE_FIXED = 1150\nIFACE_FF_10GE_CX4 = 1170\nIFACE_FF_10GE_SFP_PLUS = 1200\nIFACE_FF_10GE_XFP = 1300\nIFACE_FF_10GE_XENPAK = 1310\nIFACE_FF_10GE_X2 = 1320\nIFACE_FF_25GE_SFP28 = 1350\nIFACE_FF_40GE_QSFP_PLUS = 1400\nIFACE_FF_100GE_CFP = 1500\nIFACE_FF_100GE_CFP2 = 1510\nIFACE_FF_100GE_CFP4 = 1520\nIFACE_FF_100GE_CPAK = 1550\nIFACE_FF_100GE_QSFP28 = 1600\n# Wireless\nIFACE_FF_80211A = 2600\nIFACE_FF_80211G = 2610\nIFACE_FF_80211N = 2620\nIFACE_FF_80211AC = 2630\nIFACE_FF_80211AD = 2640\n# Fibrechannel\nIFACE_FF_1GFC_SFP = 3010\nIFACE_FF_2GFC_SFP = 3020\nIFACE_FF_4GFC_SFP = 3040\nIFACE_FF_8GFC_SFP_PLUS = 3080\nIFACE_FF_16GFC_SFP_PLUS = 3160\n# Serial\nIFACE_FF_T1 = 4000\nIFACE_FF_E1 = 4010\nIFACE_FF_T3 = 4040\nIFACE_FF_E3 = 4050\n# Stacking\nIFACE_FF_STACKWISE = 5000\nIFACE_FF_STACKWISE_PLUS = 5050\nIFACE_FF_FLEXSTACK = 5100\nIFACE_FF_FLEXSTACK_PLUS = 5150\nIFACE_FF_JUNIPER_VCP = 5200\nIFACE_FF_EXTREME_SS = 5300\nIFACE_FF_EXTREME_SS128 = 5310\nIFACE_FF_EXTREME_SS256 = 5320\nIFACE_FF_EXTREME_SS512 = 5330\n\n# Other\nIFACE_FF_OTHER = 32767\n\nIFACE_FF_CHOICES = [\n [\n 'Virtual interfaces',\n [\n [IFACE_FF_VIRTUAL, 'Virtual'],\n [IFACE_FF_LAG, 'Link Aggregation Group (LAG)'],\n ],\n ],\n [\n 'Ethernet (fixed)',\n [\n [IFACE_FF_100ME_FIXED, '100BASE-TX (10/100ME)'],\n [IFACE_FF_1GE_FIXED, '1000BASE-T (1GE)'],\n [IFACE_FF_10GE_FIXED, '10GBASE-T (10GE)'],\n [IFACE_FF_10GE_CX4, '10GBASE-CX4 (10GE)'],\n ]\n ],\n [\n 'Ethernet (modular)',\n [\n [IFACE_FF_1GE_GBIC, 'GBIC (1GE)'],\n [IFACE_FF_1GE_SFP, 'SFP (1GE)'],\n [IFACE_FF_10GE_SFP_PLUS, 'SFP+ (10GE)'],\n [IFACE_FF_10GE_XFP, 'XFP (10GE)'],\n [IFACE_FF_10GE_XENPAK, 'XENPAK (10GE)'],\n [IFACE_FF_10GE_X2, 'X2 (10GE)'],\n [IFACE_FF_25GE_SFP28, 'SFP28 (25GE)'],\n [IFACE_FF_40GE_QSFP_PLUS, 'QSFP+ (40GE)'],\n [IFACE_FF_100GE_CFP, 'CFP (100GE)'],\n [IFACE_FF_100GE_CFP2, 'CFP2 (100GE)'],\n [IFACE_FF_100GE_CFP4, 'CFP4 (100GE)'],\n [IFACE_FF_100GE_CPAK, 'Cisco CPAK (100GE)'],\n [IFACE_FF_100GE_QSFP28, 'QSFP28 (100GE)'],\n ]\n ],\n [\n 'Wireless',\n [\n [IFACE_FF_80211A, 'IEEE 802.11a'],\n [IFACE_FF_80211G, 'IEEE 802.11b/g'],\n [IFACE_FF_80211N, 'IEEE 802.11n'],\n [IFACE_FF_80211AC, 'IEEE 802.11ac'],\n [IFACE_FF_80211AD, 'IEEE 802.11ad'],\n ]\n ],\n [\n 'FibreChannel',\n [\n [IFACE_FF_1GFC_SFP, 'SFP (1GFC)'],\n [IFACE_FF_2GFC_SFP, 'SFP (2GFC)'],\n [IFACE_FF_4GFC_SFP, 'SFP (4GFC)'],\n [IFACE_FF_8GFC_SFP_PLUS, 'SFP+ (8GFC)'],\n [IFACE_FF_16GFC_SFP_PLUS, 'SFP+ (16GFC)'],\n ]\n ],\n [\n 'Serial',\n [\n [IFACE_FF_T1, 'T1 (1.544 Mbps)'],\n [IFACE_FF_E1, 'E1 (2.048 Mbps)'],\n [IFACE_FF_T3, 'T3 (45 Mbps)'],\n [IFACE_FF_E3, 'E3 (34 Mbps)'],\n ]\n ],\n [\n 'Stacking',\n [\n [IFACE_FF_STACKWISE, 'Cisco StackWise'],\n [IFACE_FF_STACKWISE_PLUS, 'Cisco StackWise Plus'],\n [IFACE_FF_FLEXSTACK, 'Cisco FlexStack'],\n [IFACE_FF_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'],\n [IFACE_FF_JUNIPER_VCP, 'Juniper VCP'],\n [IFACE_FF_EXTREME_SS, 'Extreme SummitStack'],\n [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-128'],\n [IFACE_FF_EXTREME_SS128, 'Extreme SummitStack-256'],\n [IFACE_FF_EXTREME_SS512, 'Extreme SummitStack-512'],\n ]\n ],\n [\n 'Other',\n [\n [IFACE_FF_OTHER, 'Other'],\n ]\n ],\n]\n\nVIRTUAL_IFACE_TYPES = [\n IFACE_FF_VIRTUAL,\n IFACE_FF_LAG,\n]\n\nWIRELESS_IFACE_TYPES = [\n IFACE_FF_80211A,\n IFACE_FF_80211G,\n IFACE_FF_80211N,\n IFACE_FF_80211AC,\n IFACE_FF_80211AD,\n]\n\nNONCONNECTABLE_IFACE_TYPES = VIRTUAL_IFACE_TYPES + WIRELESS_IFACE_TYPES\n\nIFACE_MODE_ACCESS = 100\nIFACE_MODE_TAGGED = 200\nIFACE_MODE_TAGGED_ALL = 300\nIFACE_MODE_CHOICES = [\n [IFACE_MODE_ACCESS, 'Access'],\n [IFACE_MODE_TAGGED, 'Tagged'],\n [IFACE_MODE_TAGGED_ALL, 'Tagged All'],\n]\n\n# Device statuses\nDEVICE_STATUS_OFFLINE = 0\nDEVICE_STATUS_ACTIVE = 1\nDEVICE_STATUS_PLANNED = 2\nDEVICE_STATUS_STAGED = 3\nDEVICE_STATUS_FAILED = 4\nDEVICE_STATUS_INVENTORY = 5\nDEVICE_STATUS_CHOICES = [\n [DEVICE_STATUS_ACTIVE, 'Active'],\n [DEVICE_STATUS_OFFLINE, 'Offline'],\n [DEVICE_STATUS_PLANNED, 'Planned'],\n [DEVICE_STATUS_STAGED, 'Staged'],\n [DEVICE_STATUS_FAILED, 'Failed'],\n [DEVICE_STATUS_INVENTORY, 'Inventory'],\n]\n\n# Site statuses\nSITE_STATUS_ACTIVE = 1\nSITE_STATUS_PLANNED = 2\nSITE_STATUS_RETIRED = 4\nSITE_STATUS_CHOICES = [\n [SITE_STATUS_ACTIVE, 'Active'],\n [SITE_STATUS_PLANNED, 'Planned'],\n [SITE_STATUS_RETIRED, 'Retired'],\n]\n\n# Bootstrap CSS classes for device statuses\nSTATUS_CLASSES = {\n 0: 'warning',\n 1: 'success',\n 2: 'info',\n 3: 'primary',\n 4: 'danger',\n 5: 'default',\n}\n\n# Console/power/interface connection statuses\nCONNECTION_STATUS_PLANNED = False\nCONNECTION_STATUS_CONNECTED = True\nCONNECTION_STATUS_CHOICES = [\n [CONNECTION_STATUS_PLANNED, 'Planned'],\n [CONNECTION_STATUS_CONNECTED, 'Connected'],\n]\n\n# Platform -> RPC client mappings\nRPC_CLIENT_JUNIPER_JUNOS = 'juniper-junos'\nRPC_CLIENT_CISCO_IOS = 'cisco-ios'\nRPC_CLIENT_OPENGEAR = 'opengear'\nRPC_CLIENT_CHOICES = [\n [RPC_CLIENT_JUNIPER_JUNOS, 'Juniper Junos (NETCONF)'],\n [RPC_CLIENT_CISCO_IOS, 'Cisco IOS (SSH)'],\n [RPC_CLIENT_OPENGEAR, 'Opengear (SSH)'],\n]\n", "path": "netbox/dcim/constants.py"}]}
| 3,940 | 331 |
gh_patches_debug_148
|
rasdani/github-patches
|
git_diff
|
AUTOMATIC1111__stable-diffusion-webui-7583
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: vae does not appear when clicking refresh button in models/VAE
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Pressing the button to update the VAE list does not update the VAE list.
### Steps to reproduce the problem
1. Insert new VAE file to models/VAE
2. Press buttion Refresh VAE list
### What should have happened?
Apprear new VAE file in list
### Commit where the problem happens
Lastest
### What platforms do you use to access the UI ?
_No response_
### What browsers do you use to access the UI ?
_No response_
### Command Line Arguments
```Shell
No
```
### List of extensions
No
### Console logs
```Shell
Nothing
```
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/shared_items.py`
Content:
```
1
2
3 def realesrgan_models_names():
4 import modules.realesrgan_model
5 return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
6
7
8 def postprocessing_scripts():
9 import modules.scripts
10
11 return modules.scripts.scripts_postproc.scripts
12
13
14 def sd_vae_items():
15 import modules.sd_vae
16
17 return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
18
19
20 def refresh_vae_list():
21 import modules.sd_vae
22
23 return modules.sd_vae.refresh_vae_list
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modules/shared_items.py b/modules/shared_items.py
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -20,4 +20,4 @@
def refresh_vae_list():
import modules.sd_vae
- return modules.sd_vae.refresh_vae_list
+ return modules.sd_vae.refresh_vae_list()
|
{"golden_diff": "diff --git a/modules/shared_items.py b/modules/shared_items.py\n--- a/modules/shared_items.py\n+++ b/modules/shared_items.py\n@@ -20,4 +20,4 @@\n def refresh_vae_list():\r\n import modules.sd_vae\r\n \r\n- return modules.sd_vae.refresh_vae_list\r\n+ return modules.sd_vae.refresh_vae_list()\n", "issue": "[Bug]: vae does not appear when clicking refresh button in models/VAE\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nPressing the button to update the VAE list does not update the VAE list.\n\n### Steps to reproduce the problem\n\n1. Insert new VAE file to models/VAE\r\n2. Press buttion Refresh VAE list \n\n### What should have happened?\n\nApprear new VAE file in list\n\n### Commit where the problem happens\n\nLastest\n\n### What platforms do you use to access the UI ?\n\n_No response_\n\n### What browsers do you use to access the UI ?\n\n_No response_\n\n### Command Line Arguments\n\n```Shell\nNo\n```\n\n\n### List of extensions\n\nNo\n\n### Console logs\n\n```Shell\nNothing\n```\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "\r\n\r\ndef realesrgan_models_names():\r\n import modules.realesrgan_model\r\n return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]\r\n\r\n\r\ndef postprocessing_scripts():\r\n import modules.scripts\r\n\r\n return modules.scripts.scripts_postproc.scripts\r\n\r\n\r\ndef sd_vae_items():\r\n import modules.sd_vae\r\n\r\n return [\"Automatic\", \"None\"] + list(modules.sd_vae.vae_dict)\r\n\r\n\r\ndef refresh_vae_list():\r\n import modules.sd_vae\r\n\r\n return modules.sd_vae.refresh_vae_list\r\n", "path": "modules/shared_items.py"}], "after_files": [{"content": "\r\n\r\ndef realesrgan_models_names():\r\n import modules.realesrgan_model\r\n return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]\r\n\r\n\r\ndef postprocessing_scripts():\r\n import modules.scripts\r\n\r\n return modules.scripts.scripts_postproc.scripts\r\n\r\n\r\ndef sd_vae_items():\r\n import modules.sd_vae\r\n\r\n return [\"Automatic\", \"None\"] + list(modules.sd_vae.vae_dict)\r\n\r\n\r\ndef refresh_vae_list():\r\n import modules.sd_vae\r\n\r\n return modules.sd_vae.refresh_vae_list()\r\n", "path": "modules/shared_items.py"}]}
| 610 | 78 |
gh_patches_debug_42142
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-87
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add way to temporarily/permanently disable hooks.
[overcommit](https://github.com/causes/overcommit) uses environment variables to do temporary skipping...
For instance:
`SKIP=foo git commit` will skip the `foo` hook
Whereas I've used a more-permanent switching with `git config hooks.foo false` in the past.
Considering both approaches, I think overcommit does this quite elegantly while focusing on only _temporarily_ disabling hooks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands.py`
Content:
```
1 from __future__ import print_function
2
3 import logging
4 import os
5 import pkg_resources
6 import shutil
7 import stat
8 import subprocess
9 import sys
10 from asottile.ordereddict import OrderedDict
11 from asottile.yaml import ordered_dump
12 from asottile.yaml import ordered_load
13 from plumbum import local
14
15 import pre_commit.constants as C
16 from pre_commit import git
17 from pre_commit import color
18 from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
19 from pre_commit.clientlib.validate_config import load_config
20 from pre_commit.jsonschema_extensions import remove_defaults
21 from pre_commit.logging_handler import LoggingHandler
22 from pre_commit.repository import Repository
23 from pre_commit.staged_files_only import staged_files_only
24 from pre_commit.util import noop_context
25
26
27 logger = logging.getLogger('pre_commit')
28
29 COLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])
30
31 PASS_FAIL_LENGTH = 6
32
33
34 def install(runner):
35 """Install the pre-commit hooks."""
36 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
37 with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:
38 pre_commit_file_obj.write(open(pre_commit_file).read())
39
40 original_mode = os.stat(runner.pre_commit_path).st_mode
41 os.chmod(
42 runner.pre_commit_path,
43 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
44 )
45
46 print('pre-commit installed at {0}'.format(runner.pre_commit_path))
47
48 return 0
49
50
51 def uninstall(runner):
52 """Uninstall the pre-commit hooks."""
53 if os.path.exists(runner.pre_commit_path):
54 os.remove(runner.pre_commit_path)
55 print('pre-commit uninstalled')
56 return 0
57
58
59 class RepositoryCannotBeUpdatedError(RuntimeError):
60 pass
61
62
63 def _update_repository(repo_config):
64 """Updates a repository to the tip of `master`. If the repository cannot
65 be updated because a hook that is configured does not exist in `master`,
66 this raises a RepositoryCannotBeUpdatedError
67
68 Args:
69 repo_config - A config for a repository
70 """
71 repo = Repository(repo_config)
72
73 with repo.in_checkout():
74 local['git']['fetch']()
75 head_sha = local['git']['rev-parse', 'origin/master']().strip()
76
77 # Don't bother trying to update if our sha is the same
78 if head_sha == repo_config['sha']:
79 return repo_config
80
81 # Construct a new config with the head sha
82 new_config = OrderedDict(repo_config)
83 new_config['sha'] = head_sha
84 new_repo = Repository(new_config)
85
86 # See if any of our hooks were deleted with the new commits
87 hooks = set(repo.hooks.keys())
88 hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))
89 if hooks_missing:
90 raise RepositoryCannotBeUpdatedError(
91 'Cannot update because the tip of master is missing these hooks:\n'
92 '{0}'.format(', '.join(sorted(hooks_missing)))
93 )
94
95 return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]
96
97
98 def autoupdate(runner):
99 """Auto-update the pre-commit config to the latest versions of repos."""
100 retv = 0
101 output_configs = []
102 changed = False
103
104 input_configs = load_config(
105 runner.config_file_path,
106 load_strategy=ordered_load,
107 )
108
109 for repo_config in input_configs:
110 print('Updating {0}...'.format(repo_config['repo']), end='')
111 try:
112 new_repo_config = _update_repository(repo_config)
113 except RepositoryCannotBeUpdatedError as error:
114 print(error.args[0])
115 output_configs.append(repo_config)
116 retv = 1
117 continue
118
119 if new_repo_config['sha'] != repo_config['sha']:
120 changed = True
121 print(
122 'updating {0} -> {1}.'.format(
123 repo_config['sha'], new_repo_config['sha'],
124 )
125 )
126 output_configs.append(new_repo_config)
127 else:
128 print('already up to date.')
129 output_configs.append(repo_config)
130
131 if changed:
132 with open(runner.config_file_path, 'w') as config_file:
133 config_file.write(
134 ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)
135 )
136
137 return retv
138
139
140 def clean(runner):
141 if os.path.exists(runner.hooks_workspace_path):
142 shutil.rmtree(runner.hooks_workspace_path)
143 print('Cleaned {0}.'.format(runner.hooks_workspace_path))
144 return 0
145
146
147 def _run_single_hook(runner, repository, hook_id, args, write):
148 if args.all_files:
149 get_filenames = git.get_all_files_matching
150 elif git.is_in_merge_conflict():
151 get_filenames = git.get_conflicted_files_matching
152 else:
153 get_filenames = git.get_staged_files_matching
154
155 hook = repository.hooks[hook_id]
156
157 filenames = get_filenames(hook['files'], hook['exclude'])
158 if not filenames:
159 no_files_msg = '(no files to check) '
160 skipped_msg = 'Skipped'
161 write(
162 '{0}{1}{2}{3}\n'.format(
163 hook['name'],
164 '.' * (
165 COLS -
166 len(hook['name']) -
167 len(no_files_msg) -
168 len(skipped_msg) -
169 6
170 ),
171 no_files_msg,
172 color.format_color(skipped_msg, color.TURQUOISE, args.color),
173 )
174 )
175 return 0
176
177 # Print the hook and the dots first in case the hook takes hella long to
178 # run.
179 write(
180 '{0}{1}'.format(
181 hook['name'],
182 '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),
183 ),
184 )
185 sys.stdout.flush()
186
187 retcode, stdout, stderr = repository.run_hook(
188 runner.cmd_runner,
189 hook_id,
190 filenames,
191 )
192
193 if retcode != repository.hooks[hook_id]['expected_return_value']:
194 retcode = 1
195 print_color = color.RED
196 pass_fail = 'Failed'
197 else:
198 retcode = 0
199 print_color = color.GREEN
200 pass_fail = 'Passed'
201
202 write(color.format_color(pass_fail, print_color, args.color) + '\n')
203
204 if (stdout or stderr) and (retcode or args.verbose):
205 write('\n')
206 for output in (stdout, stderr):
207 if output.strip():
208 write(output.strip() + '\n')
209 write('\n')
210
211 return retcode
212
213
214 def _run_hooks(runner, args, write):
215 """Actually run the hooks."""
216 retval = 0
217
218 for repo in runner.repositories:
219 for hook_id in repo.hooks:
220 retval |= _run_single_hook(runner, repo, hook_id, args, write=write)
221
222 return retval
223
224
225 def _run_hook(runner, hook_id, args, write):
226 for repo in runner.repositories:
227 if hook_id in repo.hooks:
228 return _run_single_hook(runner, repo, hook_id, args, write=write)
229 else:
230 write('No hook with id `{0}`\n'.format(hook_id))
231 return 1
232
233
234 def _has_unmerged_paths(runner):
235 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
236 return bool(stdout.strip())
237
238
239 def run(runner, args, write=sys.stdout.write):
240 # Set up our logging handler
241 logger.addHandler(LoggingHandler(args.color, write=write))
242 logger.setLevel(logging.INFO)
243
244 # Check if we have unresolved merge conflict files and fail fast.
245 if _has_unmerged_paths(runner):
246 logger.error('Unmerged files. Resolve before committing.')
247 return 1
248
249 if args.no_stash or args.all_files:
250 ctx = noop_context()
251 else:
252 ctx = staged_files_only(runner.cmd_runner)
253
254 with ctx:
255 if args.hook:
256 return _run_hook(runner, args.hook, args, write=write)
257 else:
258 return _run_hooks(runner, args, write=write)
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands.py b/pre_commit/commands.py
--- a/pre_commit/commands.py
+++ b/pre_commit/commands.py
@@ -144,7 +144,42 @@
return 0
-def _run_single_hook(runner, repository, hook_id, args, write):
+def _get_skips(environ):
+ skips = environ.get('SKIP', '')
+ return set(skip.strip() for skip in skips.split(',') if skip.strip())
+
+
+def _print_no_files_skipped(hook, write, args):
+ no_files_msg = '(no files to check) '
+ skipped_msg = 'Skipped'
+ write(
+ '{0}{1}{2}{3}\n'.format(
+ hook['name'],
+ '.' * (
+ COLS -
+ len(hook['name']) -
+ len(no_files_msg) -
+ len(skipped_msg) -
+ 6
+ ),
+ no_files_msg,
+ color.format_color(skipped_msg, color.TURQUOISE, args.color),
+ )
+ )
+
+
+def _print_user_skipped(hook, write, args):
+ skipped_msg = 'Skipped'
+ write(
+ '{0}{1}{2}\n'.format(
+ hook['name'],
+ '.' * (COLS - len(hook['name']) - len(skipped_msg) - 6),
+ color.format_color(skipped_msg, color.YELLOW, args.color),
+ ),
+ )
+
+
+def _run_single_hook(runner, repository, hook_id, args, write, skips=set()):
if args.all_files:
get_filenames = git.get_all_files_matching
elif git.is_in_merge_conflict():
@@ -155,23 +190,11 @@
hook = repository.hooks[hook_id]
filenames = get_filenames(hook['files'], hook['exclude'])
- if not filenames:
- no_files_msg = '(no files to check) '
- skipped_msg = 'Skipped'
- write(
- '{0}{1}{2}{3}\n'.format(
- hook['name'],
- '.' * (
- COLS -
- len(hook['name']) -
- len(no_files_msg) -
- len(skipped_msg) -
- 6
- ),
- no_files_msg,
- color.format_color(skipped_msg, color.TURQUOISE, args.color),
- )
- )
+ if hook_id in skips:
+ _print_user_skipped(hook, write, args)
+ return 0
+ elif not filenames:
+ _print_no_files_skipped(hook, write, args)
return 0
# Print the hook and the dots first in case the hook takes hella long to
@@ -211,18 +234,23 @@
return retcode
-def _run_hooks(runner, args, write):
+def _run_hooks(runner, args, write, environ):
"""Actually run the hooks."""
retval = 0
+ skips = _get_skips(environ)
+
for repo in runner.repositories:
for hook_id in repo.hooks:
- retval |= _run_single_hook(runner, repo, hook_id, args, write=write)
+ retval |= _run_single_hook(
+ runner, repo, hook_id, args, write, skips=skips,
+ )
return retval
-def _run_hook(runner, hook_id, args, write):
+def _run_hook(runner, args, write):
+ hook_id = args.hook
for repo in runner.repositories:
if hook_id in repo.hooks:
return _run_single_hook(runner, repo, hook_id, args, write=write)
@@ -236,7 +264,7 @@
return bool(stdout.strip())
-def run(runner, args, write=sys.stdout.write):
+def run(runner, args, write=sys.stdout.write, environ=os.environ):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
@@ -253,6 +281,6 @@
with ctx:
if args.hook:
- return _run_hook(runner, args.hook, args, write=write)
+ return _run_hook(runner, args, write=write)
else:
- return _run_hooks(runner, args, write=write)
+ return _run_hooks(runner, args, write=write, environ=environ)
|
{"golden_diff": "diff --git a/pre_commit/commands.py b/pre_commit/commands.py\n--- a/pre_commit/commands.py\n+++ b/pre_commit/commands.py\n@@ -144,7 +144,42 @@\n return 0\n \n \n-def _run_single_hook(runner, repository, hook_id, args, write):\n+def _get_skips(environ):\n+ skips = environ.get('SKIP', '')\n+ return set(skip.strip() for skip in skips.split(',') if skip.strip())\n+\n+\n+def _print_no_files_skipped(hook, write, args):\n+ no_files_msg = '(no files to check) '\n+ skipped_msg = 'Skipped'\n+ write(\n+ '{0}{1}{2}{3}\\n'.format(\n+ hook['name'],\n+ '.' * (\n+ COLS -\n+ len(hook['name']) -\n+ len(no_files_msg) -\n+ len(skipped_msg) -\n+ 6\n+ ),\n+ no_files_msg,\n+ color.format_color(skipped_msg, color.TURQUOISE, args.color),\n+ )\n+ )\n+\n+\n+def _print_user_skipped(hook, write, args):\n+ skipped_msg = 'Skipped'\n+ write(\n+ '{0}{1}{2}\\n'.format(\n+ hook['name'],\n+ '.' * (COLS - len(hook['name']) - len(skipped_msg) - 6),\n+ color.format_color(skipped_msg, color.YELLOW, args.color),\n+ ),\n+ )\n+\n+\n+def _run_single_hook(runner, repository, hook_id, args, write, skips=set()):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n@@ -155,23 +190,11 @@\n hook = repository.hooks[hook_id]\n \n filenames = get_filenames(hook['files'], hook['exclude'])\n- if not filenames:\n- no_files_msg = '(no files to check) '\n- skipped_msg = 'Skipped'\n- write(\n- '{0}{1}{2}{3}\\n'.format(\n- hook['name'],\n- '.' * (\n- COLS -\n- len(hook['name']) -\n- len(no_files_msg) -\n- len(skipped_msg) -\n- 6\n- ),\n- no_files_msg,\n- color.format_color(skipped_msg, color.TURQUOISE, args.color),\n- )\n- )\n+ if hook_id in skips:\n+ _print_user_skipped(hook, write, args)\n+ return 0\n+ elif not filenames:\n+ _print_no_files_skipped(hook, write, args)\n return 0\n \n # Print the hook and the dots first in case the hook takes hella long to\n@@ -211,18 +234,23 @@\n return retcode\n \n \n-def _run_hooks(runner, args, write):\n+def _run_hooks(runner, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n \n+ skips = _get_skips(environ)\n+\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n- retval |= _run_single_hook(runner, repo, hook_id, args, write=write)\n+ retval |= _run_single_hook(\n+ runner, repo, hook_id, args, write, skips=skips,\n+ )\n \n return retval\n \n \n-def _run_hook(runner, hook_id, args, write):\n+def _run_hook(runner, args, write):\n+ hook_id = args.hook\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n@@ -236,7 +264,7 @@\n return bool(stdout.strip())\n \n \n-def run(runner, args, write=sys.stdout.write):\n+def run(runner, args, write=sys.stdout.write, environ=os.environ):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n@@ -253,6 +281,6 @@\n \n with ctx:\n if args.hook:\n- return _run_hook(runner, args.hook, args, write=write)\n+ return _run_hook(runner, args, write=write)\n else:\n- return _run_hooks(runner, args, write=write)\n+ return _run_hooks(runner, args, write=write, environ=environ)\n", "issue": "Add way to temporarily/permanently disable hooks.\n[overcommit](https://github.com/causes/overcommit) uses environment variables to do temporary skipping...\n\nFor instance:\n\n`SKIP=foo git commit` will skip the `foo` hook\n\nWhereas I've used a more-permanent switching with `git config hooks.foo false` in the past.\n\nConsidering both approaches, I think overcommit does this quite elegantly while focusing on only _temporarily_ disabling hooks.\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom asottile.ordereddict import OrderedDict\nfrom asottile.yaml import ordered_dump\nfrom asottile.yaml import ordered_load\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import color\nfrom pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.jsonschema_extensions import remove_defaults\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.repository import Repository\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\nCOLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])\n\nPASS_FAIL_LENGTH = 6\n\n\ndef install(runner):\n \"\"\"Install the pre-commit hooks.\"\"\"\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:\n pre_commit_file_obj.write(open(pre_commit_file).read())\n\n original_mode = os.stat(runner.pre_commit_path).st_mode\n os.chmod(\n runner.pre_commit_path,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n print('pre-commit installed at {0}'.format(runner.pre_commit_path))\n\n return 0\n\n\ndef uninstall(runner):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n if os.path.exists(runner.pre_commit_path):\n os.remove(runner.pre_commit_path)\n print('pre-commit uninstalled')\n return 0\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _update_repository(repo_config):\n \"\"\"Updates a repository to the tip of `master`. If the repository cannot\n be updated because a hook that is configured does not exist in `master`,\n this raises a RepositoryCannotBeUpdatedError\n\n Args:\n repo_config - A config for a repository\n \"\"\"\n repo = Repository(repo_config)\n\n with repo.in_checkout():\n local['git']['fetch']()\n head_sha = local['git']['rev-parse', 'origin/master']().strip()\n\n # Don't bother trying to update if our sha is the same\n if head_sha == repo_config['sha']:\n return repo_config\n\n # Construct a new config with the head sha\n new_config = OrderedDict(repo_config)\n new_config['sha'] = head_sha\n new_repo = Repository(new_config)\n\n # See if any of our hooks were deleted with the new commits\n hooks = set(repo.hooks.keys())\n hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{0}'.format(', '.join(sorted(hooks_missing)))\n )\n\n return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]\n\n\ndef autoupdate(runner):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n retv = 0\n output_configs = []\n changed = False\n\n input_configs = load_config(\n runner.config_file_path,\n load_strategy=ordered_load,\n )\n\n for repo_config in input_configs:\n print('Updating {0}...'.format(repo_config['repo']), end='')\n try:\n new_repo_config = _update_repository(repo_config)\n except RepositoryCannotBeUpdatedError as error:\n print(error.args[0])\n output_configs.append(repo_config)\n retv = 1\n continue\n\n if new_repo_config['sha'] != repo_config['sha']:\n changed = True\n print(\n 'updating {0} -> {1}.'.format(\n repo_config['sha'], new_repo_config['sha'],\n )\n )\n output_configs.append(new_repo_config)\n else:\n print('already up to date.')\n output_configs.append(repo_config)\n\n if changed:\n with open(runner.config_file_path, 'w') as config_file:\n config_file.write(\n ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)\n )\n\n return retv\n\n\ndef clean(runner):\n if os.path.exists(runner.hooks_workspace_path):\n shutil.rmtree(runner.hooks_workspace_path)\n print('Cleaned {0}.'.format(runner.hooks_workspace_path))\n return 0\n\n\ndef _run_single_hook(runner, repository, hook_id, args, write):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n get_filenames = git.get_conflicted_files_matching\n else:\n get_filenames = git.get_staged_files_matching\n\n hook = repository.hooks[hook_id]\n\n filenames = get_filenames(hook['files'], hook['exclude'])\n if not filenames:\n no_files_msg = '(no files to check) '\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}{3}\\n'.format(\n hook['name'],\n '.' * (\n COLS -\n len(hook['name']) -\n len(no_files_msg) -\n len(skipped_msg) -\n 6\n ),\n no_files_msg,\n color.format_color(skipped_msg, color.TURQUOISE, args.color),\n )\n )\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(\n '{0}{1}'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),\n ),\n )\n sys.stdout.flush()\n\n retcode, stdout, stderr = repository.run_hook(\n runner.cmd_runner,\n hook_id,\n filenames,\n )\n\n if retcode != repository.hooks[hook_id]['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(runner, args, write):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n retval |= _run_single_hook(runner, repo, hook_id, args, write=write)\n\n return retval\n\n\ndef _run_hook(runner, hook_id, args, write):\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n else:\n write('No hook with id `{0}`\\n'.format(hook_id))\n return 1\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n if args.hook:\n return _run_hook(runner, args.hook, args, write=write)\n else:\n return _run_hooks(runner, args, write=write)\n", "path": "pre_commit/commands.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom asottile.ordereddict import OrderedDict\nfrom asottile.yaml import ordered_dump\nfrom asottile.yaml import ordered_load\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import color\nfrom pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.jsonschema_extensions import remove_defaults\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.repository import Repository\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\nCOLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])\n\nPASS_FAIL_LENGTH = 6\n\n\ndef install(runner):\n \"\"\"Install the pre-commit hooks.\"\"\"\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:\n pre_commit_file_obj.write(open(pre_commit_file).read())\n\n original_mode = os.stat(runner.pre_commit_path).st_mode\n os.chmod(\n runner.pre_commit_path,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n print('pre-commit installed at {0}'.format(runner.pre_commit_path))\n\n return 0\n\n\ndef uninstall(runner):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n if os.path.exists(runner.pre_commit_path):\n os.remove(runner.pre_commit_path)\n print('pre-commit uninstalled')\n return 0\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _update_repository(repo_config):\n \"\"\"Updates a repository to the tip of `master`. If the repository cannot\n be updated because a hook that is configured does not exist in `master`,\n this raises a RepositoryCannotBeUpdatedError\n\n Args:\n repo_config - A config for a repository\n \"\"\"\n repo = Repository(repo_config)\n\n with repo.in_checkout():\n local['git']['fetch']()\n head_sha = local['git']['rev-parse', 'origin/master']().strip()\n\n # Don't bother trying to update if our sha is the same\n if head_sha == repo_config['sha']:\n return repo_config\n\n # Construct a new config with the head sha\n new_config = OrderedDict(repo_config)\n new_config['sha'] = head_sha\n new_repo = Repository(new_config)\n\n # See if any of our hooks were deleted with the new commits\n hooks = set(repo.hooks.keys())\n hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{0}'.format(', '.join(sorted(hooks_missing)))\n )\n\n return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]\n\n\ndef autoupdate(runner):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n retv = 0\n output_configs = []\n changed = False\n\n input_configs = load_config(\n runner.config_file_path,\n load_strategy=ordered_load,\n )\n\n for repo_config in input_configs:\n print('Updating {0}...'.format(repo_config['repo']), end='')\n try:\n new_repo_config = _update_repository(repo_config)\n except RepositoryCannotBeUpdatedError as error:\n print(error.args[0])\n output_configs.append(repo_config)\n retv = 1\n continue\n\n if new_repo_config['sha'] != repo_config['sha']:\n changed = True\n print(\n 'updating {0} -> {1}.'.format(\n repo_config['sha'], new_repo_config['sha'],\n )\n )\n output_configs.append(new_repo_config)\n else:\n print('already up to date.')\n output_configs.append(repo_config)\n\n if changed:\n with open(runner.config_file_path, 'w') as config_file:\n config_file.write(\n ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)\n )\n\n return retv\n\n\ndef clean(runner):\n if os.path.exists(runner.hooks_workspace_path):\n shutil.rmtree(runner.hooks_workspace_path)\n print('Cleaned {0}.'.format(runner.hooks_workspace_path))\n return 0\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _print_no_files_skipped(hook, write, args):\n no_files_msg = '(no files to check) '\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}{3}\\n'.format(\n hook['name'],\n '.' * (\n COLS -\n len(hook['name']) -\n len(no_files_msg) -\n len(skipped_msg) -\n 6\n ),\n no_files_msg,\n color.format_color(skipped_msg, color.TURQUOISE, args.color),\n )\n )\n\n\ndef _print_user_skipped(hook, write, args):\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}\\n'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - len(skipped_msg) - 6),\n color.format_color(skipped_msg, color.YELLOW, args.color),\n ),\n )\n\n\ndef _run_single_hook(runner, repository, hook_id, args, write, skips=set()):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n get_filenames = git.get_conflicted_files_matching\n else:\n get_filenames = git.get_staged_files_matching\n\n hook = repository.hooks[hook_id]\n\n filenames = get_filenames(hook['files'], hook['exclude'])\n if hook_id in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(\n '{0}{1}'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),\n ),\n )\n sys.stdout.flush()\n\n retcode, stdout, stderr = repository.run_hook(\n runner.cmd_runner,\n hook_id,\n filenames,\n )\n\n if retcode != repository.hooks[hook_id]['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(runner, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n\n skips = _get_skips(environ)\n\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n retval |= _run_single_hook(\n runner, repo, hook_id, args, write, skips=skips,\n )\n\n return retval\n\n\ndef _run_hook(runner, args, write):\n hook_id = args.hook\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n else:\n write('No hook with id `{0}`\\n'.format(hook_id))\n return 1\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef run(runner, args, write=sys.stdout.write, environ=os.environ):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n if args.hook:\n return _run_hook(runner, args, write=write)\n else:\n return _run_hooks(runner, args, write=write, environ=environ)\n", "path": "pre_commit/commands.py"}]}
| 2,808 | 1,016 |
gh_patches_debug_29553
|
rasdani/github-patches
|
git_diff
|
borgbackup__borg-2980
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
netbsd: no readline
Python build missing readline, likely:
```
def import_paperkey(self, args):
# imported here because it has global side effects
> import readline
E ImportError: No module named 'readline'
.tox/py34/lib/python3.4/site-packages/borg/crypto/keymanager.py:146: ImportError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/borg/crypto/keymanager.py`
Content:
```
1 import binascii
2 import pkgutil
3 import textwrap
4 from binascii import unhexlify, a2b_base64, b2a_base64
5 from hashlib import sha256
6
7 from ..helpers import Manifest, NoManifestError, Error, yes, bin_to_hex, dash_open
8 from ..repository import Repository
9
10 from .key import KeyfileKey, KeyfileNotFoundError, KeyBlobStorage, identify_key
11
12
13 class UnencryptedRepo(Error):
14 """Keymanagement not available for unencrypted repositories."""
15
16
17 class UnknownKeyType(Error):
18 """Keytype {0} is unknown."""
19
20
21 class RepoIdMismatch(Error):
22 """This key backup seems to be for a different backup repository, aborting."""
23
24
25 class NotABorgKeyFile(Error):
26 """This file is not a borg key backup, aborting."""
27
28
29 def sha256_truncated(data, num):
30 h = sha256()
31 h.update(data)
32 return h.hexdigest()[:num]
33
34
35 class KeyManager:
36 def __init__(self, repository):
37 self.repository = repository
38 self.keyblob = None
39 self.keyblob_storage = None
40
41 try:
42 manifest_data = self.repository.get(Manifest.MANIFEST_ID)
43 except Repository.ObjectNotFound:
44 raise NoManifestError
45
46 key = identify_key(manifest_data)
47 self.keyblob_storage = key.STORAGE
48 if self.keyblob_storage == KeyBlobStorage.NO_STORAGE:
49 raise UnencryptedRepo()
50
51 def load_keyblob(self):
52 if self.keyblob_storage == KeyBlobStorage.KEYFILE:
53 k = KeyfileKey(self.repository)
54 target = k.find_key()
55 with open(target, 'r') as fd:
56 self.keyblob = ''.join(fd.readlines()[1:])
57
58 elif self.keyblob_storage == KeyBlobStorage.REPO:
59 self.keyblob = self.repository.load_key().decode()
60
61 def store_keyblob(self, args):
62 if self.keyblob_storage == KeyBlobStorage.KEYFILE:
63 k = KeyfileKey(self.repository)
64 try:
65 target = k.find_key()
66 except KeyfileNotFoundError:
67 target = k.get_new_target(args)
68
69 self.store_keyfile(target)
70 elif self.keyblob_storage == KeyBlobStorage.REPO:
71 self.repository.save_key(self.keyblob.encode('utf-8'))
72
73 def get_keyfile_data(self):
74 data = '%s %s\n' % (KeyfileKey.FILE_ID, bin_to_hex(self.repository.id))
75 data += self.keyblob
76 if not self.keyblob.endswith('\n'):
77 data += '\n'
78 return data
79
80 def store_keyfile(self, target):
81 with open(target, 'w') as fd:
82 fd.write(self.get_keyfile_data())
83
84 def export(self, path):
85 self.store_keyfile(path)
86
87 def export_qr(self, path):
88 with open(path, 'wb') as fd:
89 key_data = self.get_keyfile_data()
90 html = pkgutil.get_data('borg', 'paperkey.html')
91 html = html.replace(b'</textarea>', key_data.encode() + b'</textarea>')
92 fd.write(html)
93
94 def export_paperkey(self, path):
95 def grouped(s):
96 ret = ''
97 i = 0
98 for ch in s:
99 if i and i % 6 == 0:
100 ret += ' '
101 ret += ch
102 i += 1
103 return ret
104
105 export = 'To restore key use borg key import --paper /path/to/repo\n\n'
106
107 binary = a2b_base64(self.keyblob)
108 export += 'BORG PAPER KEY v1\n'
109 lines = (len(binary) + 17) // 18
110 repoid = bin_to_hex(self.repository.id)[:18]
111 complete_checksum = sha256_truncated(binary, 12)
112 export += 'id: {0:d} / {1} / {2} - {3}\n'.format(lines,
113 grouped(repoid),
114 grouped(complete_checksum),
115 sha256_truncated((str(lines) + '/' + repoid + '/' + complete_checksum).encode('ascii'), 2))
116 idx = 0
117 while len(binary):
118 idx += 1
119 binline = binary[:18]
120 checksum = sha256_truncated(idx.to_bytes(2, byteorder='big') + binline, 2)
121 export += '{0:2d}: {1} - {2}\n'.format(idx, grouped(bin_to_hex(binline)), checksum)
122 binary = binary[18:]
123
124 if path:
125 with open(path, 'w') as fd:
126 fd.write(export)
127 else:
128 print(export)
129
130 def import_keyfile(self, args):
131 file_id = KeyfileKey.FILE_ID
132 first_line = file_id + ' ' + bin_to_hex(self.repository.id) + '\n'
133 with dash_open(args.path, 'r') as fd:
134 file_first_line = fd.read(len(first_line))
135 if file_first_line != first_line:
136 if not file_first_line.startswith(file_id):
137 raise NotABorgKeyFile()
138 else:
139 raise RepoIdMismatch()
140 self.keyblob = fd.read()
141
142 self.store_keyblob(args)
143
144 def import_paperkey(self, args):
145 # imported here because it has global side effects
146 import readline
147
148 repoid = bin_to_hex(self.repository.id)[:18]
149 try:
150 while True: # used for repeating on overall checksum mismatch
151 # id line input
152 while True:
153 idline = input('id: ').replace(' ', '')
154 if idline == "":
155 if yes("Abort import? [yN]:"):
156 raise EOFError()
157
158 try:
159 (data, checksum) = idline.split('-')
160 except ValueError:
161 print("each line must contain exactly one '-', try again")
162 continue
163 try:
164 (id_lines, id_repoid, id_complete_checksum) = data.split('/')
165 except ValueError:
166 print("the id line must contain exactly three '/', try again")
167 continue
168 if sha256_truncated(data.lower().encode('ascii'), 2) != checksum:
169 print('line checksum did not match, try same line again')
170 continue
171 try:
172 lines = int(id_lines)
173 except ValueError:
174 print('internal error while parsing length')
175
176 break
177
178 if repoid != id_repoid:
179 raise RepoIdMismatch()
180
181 result = b''
182 idx = 1
183 # body line input
184 while True:
185 inline = input('{0:2d}: '.format(idx))
186 inline = inline.replace(' ', '')
187 if inline == "":
188 if yes("Abort import? [yN]:"):
189 raise EOFError()
190 try:
191 (data, checksum) = inline.split('-')
192 except ValueError:
193 print("each line must contain exactly one '-', try again")
194 continue
195 try:
196 part = unhexlify(data)
197 except binascii.Error:
198 print("only characters 0-9 and a-f and '-' are valid, try again")
199 continue
200 if sha256_truncated(idx.to_bytes(2, byteorder='big') + part, 2) != checksum:
201 print('line checksum did not match, try line {0} again'.format(idx))
202 continue
203 result += part
204 if idx == lines:
205 break
206 idx += 1
207
208 if sha256_truncated(result, 12) != id_complete_checksum:
209 print('The overall checksum did not match, retry or enter a blank line to abort.')
210 continue
211
212 self.keyblob = '\n'.join(textwrap.wrap(b2a_base64(result).decode('ascii'))) + '\n'
213 self.store_keyblob(args)
214 break
215
216 except EOFError:
217 print('\n - aborted')
218 return
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/borg/crypto/keymanager.py b/src/borg/crypto/keymanager.py
--- a/src/borg/crypto/keymanager.py
+++ b/src/borg/crypto/keymanager.py
@@ -142,8 +142,11 @@
self.store_keyblob(args)
def import_paperkey(self, args):
- # imported here because it has global side effects
- import readline
+ try:
+ # imported here because it has global side effects
+ import readline
+ except ImportError:
+ print('Note: No line editing available due to missing readline support')
repoid = bin_to_hex(self.repository.id)[:18]
try:
@@ -151,8 +154,8 @@
# id line input
while True:
idline = input('id: ').replace(' ', '')
- if idline == "":
- if yes("Abort import? [yN]:"):
+ if idline == '':
+ if yes('Abort import? [yN]:'):
raise EOFError()
try:
@@ -184,8 +187,8 @@
while True:
inline = input('{0:2d}: '.format(idx))
inline = inline.replace(' ', '')
- if inline == "":
- if yes("Abort import? [yN]:"):
+ if inline == '':
+ if yes('Abort import? [yN]:'):
raise EOFError()
try:
(data, checksum) = inline.split('-')
|
{"golden_diff": "diff --git a/src/borg/crypto/keymanager.py b/src/borg/crypto/keymanager.py\n--- a/src/borg/crypto/keymanager.py\n+++ b/src/borg/crypto/keymanager.py\n@@ -142,8 +142,11 @@\n self.store_keyblob(args)\n \n def import_paperkey(self, args):\n- # imported here because it has global side effects\n- import readline\n+ try:\n+ # imported here because it has global side effects\n+ import readline\n+ except ImportError:\n+ print('Note: No line editing available due to missing readline support')\n \n repoid = bin_to_hex(self.repository.id)[:18]\n try:\n@@ -151,8 +154,8 @@\n # id line input\n while True:\n idline = input('id: ').replace(' ', '')\n- if idline == \"\":\n- if yes(\"Abort import? [yN]:\"):\n+ if idline == '':\n+ if yes('Abort import? [yN]:'):\n raise EOFError()\n \n try:\n@@ -184,8 +187,8 @@\n while True:\n inline = input('{0:2d}: '.format(idx))\n inline = inline.replace(' ', '')\n- if inline == \"\":\n- if yes(\"Abort import? [yN]:\"):\n+ if inline == '':\n+ if yes('Abort import? [yN]:'):\n raise EOFError()\n try:\n (data, checksum) = inline.split('-')\n", "issue": "netbsd: no readline\nPython build missing readline, likely:\r\n```\r\n def import_paperkey(self, args):\r\n # imported here because it has global side effects\r\n> import readline\r\nE ImportError: No module named 'readline'\r\n\r\n.tox/py34/lib/python3.4/site-packages/borg/crypto/keymanager.py:146: ImportError\r\n```\n", "before_files": [{"content": "import binascii\nimport pkgutil\nimport textwrap\nfrom binascii import unhexlify, a2b_base64, b2a_base64\nfrom hashlib import sha256\n\nfrom ..helpers import Manifest, NoManifestError, Error, yes, bin_to_hex, dash_open\nfrom ..repository import Repository\n\nfrom .key import KeyfileKey, KeyfileNotFoundError, KeyBlobStorage, identify_key\n\n\nclass UnencryptedRepo(Error):\n \"\"\"Keymanagement not available for unencrypted repositories.\"\"\"\n\n\nclass UnknownKeyType(Error):\n \"\"\"Keytype {0} is unknown.\"\"\"\n\n\nclass RepoIdMismatch(Error):\n \"\"\"This key backup seems to be for a different backup repository, aborting.\"\"\"\n\n\nclass NotABorgKeyFile(Error):\n \"\"\"This file is not a borg key backup, aborting.\"\"\"\n\n\ndef sha256_truncated(data, num):\n h = sha256()\n h.update(data)\n return h.hexdigest()[:num]\n\n\nclass KeyManager:\n def __init__(self, repository):\n self.repository = repository\n self.keyblob = None\n self.keyblob_storage = None\n\n try:\n manifest_data = self.repository.get(Manifest.MANIFEST_ID)\n except Repository.ObjectNotFound:\n raise NoManifestError\n\n key = identify_key(manifest_data)\n self.keyblob_storage = key.STORAGE\n if self.keyblob_storage == KeyBlobStorage.NO_STORAGE:\n raise UnencryptedRepo()\n\n def load_keyblob(self):\n if self.keyblob_storage == KeyBlobStorage.KEYFILE:\n k = KeyfileKey(self.repository)\n target = k.find_key()\n with open(target, 'r') as fd:\n self.keyblob = ''.join(fd.readlines()[1:])\n\n elif self.keyblob_storage == KeyBlobStorage.REPO:\n self.keyblob = self.repository.load_key().decode()\n\n def store_keyblob(self, args):\n if self.keyblob_storage == KeyBlobStorage.KEYFILE:\n k = KeyfileKey(self.repository)\n try:\n target = k.find_key()\n except KeyfileNotFoundError:\n target = k.get_new_target(args)\n\n self.store_keyfile(target)\n elif self.keyblob_storage == KeyBlobStorage.REPO:\n self.repository.save_key(self.keyblob.encode('utf-8'))\n\n def get_keyfile_data(self):\n data = '%s %s\\n' % (KeyfileKey.FILE_ID, bin_to_hex(self.repository.id))\n data += self.keyblob\n if not self.keyblob.endswith('\\n'):\n data += '\\n'\n return data\n\n def store_keyfile(self, target):\n with open(target, 'w') as fd:\n fd.write(self.get_keyfile_data())\n\n def export(self, path):\n self.store_keyfile(path)\n\n def export_qr(self, path):\n with open(path, 'wb') as fd:\n key_data = self.get_keyfile_data()\n html = pkgutil.get_data('borg', 'paperkey.html')\n html = html.replace(b'</textarea>', key_data.encode() + b'</textarea>')\n fd.write(html)\n\n def export_paperkey(self, path):\n def grouped(s):\n ret = ''\n i = 0\n for ch in s:\n if i and i % 6 == 0:\n ret += ' '\n ret += ch\n i += 1\n return ret\n\n export = 'To restore key use borg key import --paper /path/to/repo\\n\\n'\n\n binary = a2b_base64(self.keyblob)\n export += 'BORG PAPER KEY v1\\n'\n lines = (len(binary) + 17) // 18\n repoid = bin_to_hex(self.repository.id)[:18]\n complete_checksum = sha256_truncated(binary, 12)\n export += 'id: {0:d} / {1} / {2} - {3}\\n'.format(lines,\n grouped(repoid),\n grouped(complete_checksum),\n sha256_truncated((str(lines) + '/' + repoid + '/' + complete_checksum).encode('ascii'), 2))\n idx = 0\n while len(binary):\n idx += 1\n binline = binary[:18]\n checksum = sha256_truncated(idx.to_bytes(2, byteorder='big') + binline, 2)\n export += '{0:2d}: {1} - {2}\\n'.format(idx, grouped(bin_to_hex(binline)), checksum)\n binary = binary[18:]\n\n if path:\n with open(path, 'w') as fd:\n fd.write(export)\n else:\n print(export)\n\n def import_keyfile(self, args):\n file_id = KeyfileKey.FILE_ID\n first_line = file_id + ' ' + bin_to_hex(self.repository.id) + '\\n'\n with dash_open(args.path, 'r') as fd:\n file_first_line = fd.read(len(first_line))\n if file_first_line != first_line:\n if not file_first_line.startswith(file_id):\n raise NotABorgKeyFile()\n else:\n raise RepoIdMismatch()\n self.keyblob = fd.read()\n\n self.store_keyblob(args)\n\n def import_paperkey(self, args):\n # imported here because it has global side effects\n import readline\n\n repoid = bin_to_hex(self.repository.id)[:18]\n try:\n while True: # used for repeating on overall checksum mismatch\n # id line input\n while True:\n idline = input('id: ').replace(' ', '')\n if idline == \"\":\n if yes(\"Abort import? [yN]:\"):\n raise EOFError()\n\n try:\n (data, checksum) = idline.split('-')\n except ValueError:\n print(\"each line must contain exactly one '-', try again\")\n continue\n try:\n (id_lines, id_repoid, id_complete_checksum) = data.split('/')\n except ValueError:\n print(\"the id line must contain exactly three '/', try again\")\n continue\n if sha256_truncated(data.lower().encode('ascii'), 2) != checksum:\n print('line checksum did not match, try same line again')\n continue\n try:\n lines = int(id_lines)\n except ValueError:\n print('internal error while parsing length')\n\n break\n\n if repoid != id_repoid:\n raise RepoIdMismatch()\n\n result = b''\n idx = 1\n # body line input\n while True:\n inline = input('{0:2d}: '.format(idx))\n inline = inline.replace(' ', '')\n if inline == \"\":\n if yes(\"Abort import? [yN]:\"):\n raise EOFError()\n try:\n (data, checksum) = inline.split('-')\n except ValueError:\n print(\"each line must contain exactly one '-', try again\")\n continue\n try:\n part = unhexlify(data)\n except binascii.Error:\n print(\"only characters 0-9 and a-f and '-' are valid, try again\")\n continue\n if sha256_truncated(idx.to_bytes(2, byteorder='big') + part, 2) != checksum:\n print('line checksum did not match, try line {0} again'.format(idx))\n continue\n result += part\n if idx == lines:\n break\n idx += 1\n\n if sha256_truncated(result, 12) != id_complete_checksum:\n print('The overall checksum did not match, retry or enter a blank line to abort.')\n continue\n\n self.keyblob = '\\n'.join(textwrap.wrap(b2a_base64(result).decode('ascii'))) + '\\n'\n self.store_keyblob(args)\n break\n\n except EOFError:\n print('\\n - aborted')\n return\n", "path": "src/borg/crypto/keymanager.py"}], "after_files": [{"content": "import binascii\nimport pkgutil\nimport textwrap\nfrom binascii import unhexlify, a2b_base64, b2a_base64\nfrom hashlib import sha256\n\nfrom ..helpers import Manifest, NoManifestError, Error, yes, bin_to_hex, dash_open\nfrom ..repository import Repository\n\nfrom .key import KeyfileKey, KeyfileNotFoundError, KeyBlobStorage, identify_key\n\n\nclass UnencryptedRepo(Error):\n \"\"\"Keymanagement not available for unencrypted repositories.\"\"\"\n\n\nclass UnknownKeyType(Error):\n \"\"\"Keytype {0} is unknown.\"\"\"\n\n\nclass RepoIdMismatch(Error):\n \"\"\"This key backup seems to be for a different backup repository, aborting.\"\"\"\n\n\nclass NotABorgKeyFile(Error):\n \"\"\"This file is not a borg key backup, aborting.\"\"\"\n\n\ndef sha256_truncated(data, num):\n h = sha256()\n h.update(data)\n return h.hexdigest()[:num]\n\n\nclass KeyManager:\n def __init__(self, repository):\n self.repository = repository\n self.keyblob = None\n self.keyblob_storage = None\n\n try:\n manifest_data = self.repository.get(Manifest.MANIFEST_ID)\n except Repository.ObjectNotFound:\n raise NoManifestError\n\n key = identify_key(manifest_data)\n self.keyblob_storage = key.STORAGE\n if self.keyblob_storage == KeyBlobStorage.NO_STORAGE:\n raise UnencryptedRepo()\n\n def load_keyblob(self):\n if self.keyblob_storage == KeyBlobStorage.KEYFILE:\n k = KeyfileKey(self.repository)\n target = k.find_key()\n with open(target, 'r') as fd:\n self.keyblob = ''.join(fd.readlines()[1:])\n\n elif self.keyblob_storage == KeyBlobStorage.REPO:\n self.keyblob = self.repository.load_key().decode()\n\n def store_keyblob(self, args):\n if self.keyblob_storage == KeyBlobStorage.KEYFILE:\n k = KeyfileKey(self.repository)\n try:\n target = k.find_key()\n except KeyfileNotFoundError:\n target = k.get_new_target(args)\n\n self.store_keyfile(target)\n elif self.keyblob_storage == KeyBlobStorage.REPO:\n self.repository.save_key(self.keyblob.encode('utf-8'))\n\n def get_keyfile_data(self):\n data = '%s %s\\n' % (KeyfileKey.FILE_ID, bin_to_hex(self.repository.id))\n data += self.keyblob\n if not self.keyblob.endswith('\\n'):\n data += '\\n'\n return data\n\n def store_keyfile(self, target):\n with open(target, 'w') as fd:\n fd.write(self.get_keyfile_data())\n\n def export(self, path):\n self.store_keyfile(path)\n\n def export_qr(self, path):\n with open(path, 'wb') as fd:\n key_data = self.get_keyfile_data()\n html = pkgutil.get_data('borg', 'paperkey.html')\n html = html.replace(b'</textarea>', key_data.encode() + b'</textarea>')\n fd.write(html)\n\n def export_paperkey(self, path):\n def grouped(s):\n ret = ''\n i = 0\n for ch in s:\n if i and i % 6 == 0:\n ret += ' '\n ret += ch\n i += 1\n return ret\n\n export = 'To restore key use borg key import --paper /path/to/repo\\n\\n'\n\n binary = a2b_base64(self.keyblob)\n export += 'BORG PAPER KEY v1\\n'\n lines = (len(binary) + 17) // 18\n repoid = bin_to_hex(self.repository.id)[:18]\n complete_checksum = sha256_truncated(binary, 12)\n export += 'id: {0:d} / {1} / {2} - {3}\\n'.format(lines,\n grouped(repoid),\n grouped(complete_checksum),\n sha256_truncated((str(lines) + '/' + repoid + '/' + complete_checksum).encode('ascii'), 2))\n idx = 0\n while len(binary):\n idx += 1\n binline = binary[:18]\n checksum = sha256_truncated(idx.to_bytes(2, byteorder='big') + binline, 2)\n export += '{0:2d}: {1} - {2}\\n'.format(idx, grouped(bin_to_hex(binline)), checksum)\n binary = binary[18:]\n\n if path:\n with open(path, 'w') as fd:\n fd.write(export)\n else:\n print(export)\n\n def import_keyfile(self, args):\n file_id = KeyfileKey.FILE_ID\n first_line = file_id + ' ' + bin_to_hex(self.repository.id) + '\\n'\n with dash_open(args.path, 'r') as fd:\n file_first_line = fd.read(len(first_line))\n if file_first_line != first_line:\n if not file_first_line.startswith(file_id):\n raise NotABorgKeyFile()\n else:\n raise RepoIdMismatch()\n self.keyblob = fd.read()\n\n self.store_keyblob(args)\n\n def import_paperkey(self, args):\n try:\n # imported here because it has global side effects\n import readline\n except ImportError:\n print('Note: No line editing available due to missing readline support')\n\n repoid = bin_to_hex(self.repository.id)[:18]\n try:\n while True: # used for repeating on overall checksum mismatch\n # id line input\n while True:\n idline = input('id: ').replace(' ', '')\n if idline == '':\n if yes('Abort import? [yN]:'):\n raise EOFError()\n\n try:\n (data, checksum) = idline.split('-')\n except ValueError:\n print(\"each line must contain exactly one '-', try again\")\n continue\n try:\n (id_lines, id_repoid, id_complete_checksum) = data.split('/')\n except ValueError:\n print(\"the id line must contain exactly three '/', try again\")\n continue\n if sha256_truncated(data.lower().encode('ascii'), 2) != checksum:\n print('line checksum did not match, try same line again')\n continue\n try:\n lines = int(id_lines)\n except ValueError:\n print('internal error while parsing length')\n\n break\n\n if repoid != id_repoid:\n raise RepoIdMismatch()\n\n result = b''\n idx = 1\n # body line input\n while True:\n inline = input('{0:2d}: '.format(idx))\n inline = inline.replace(' ', '')\n if inline == '':\n if yes('Abort import? [yN]:'):\n raise EOFError()\n try:\n (data, checksum) = inline.split('-')\n except ValueError:\n print(\"each line must contain exactly one '-', try again\")\n continue\n try:\n part = unhexlify(data)\n except binascii.Error:\n print(\"only characters 0-9 and a-f and '-' are valid, try again\")\n continue\n if sha256_truncated(idx.to_bytes(2, byteorder='big') + part, 2) != checksum:\n print('line checksum did not match, try line {0} again'.format(idx))\n continue\n result += part\n if idx == lines:\n break\n idx += 1\n\n if sha256_truncated(result, 12) != id_complete_checksum:\n print('The overall checksum did not match, retry or enter a blank line to abort.')\n continue\n\n self.keyblob = '\\n'.join(textwrap.wrap(b2a_base64(result).decode('ascii'))) + '\\n'\n self.store_keyblob(args)\n break\n\n except EOFError:\n print('\\n - aborted')\n return\n", "path": "src/borg/crypto/keymanager.py"}]}
| 2,589 | 332 |
gh_patches_debug_23129
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-1567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove or block impersonate's "list" and "search" urls
Saleor uses the [django-impersonate](https://bitbucket.org/petersanchez/django-impersonate/overview) for client impersonation feature. While working on #1549 I've found out that in addition to two views that we are using (start and stop impersonating the user), the library brings additional two views that we don't really want to support:
https://demo.getsaleor.com/impersonate/list/
https://demo.getsaleor.com/impersonate/search/?q=admin (note: this one 500's on link)
Ideally, library would've provided us with a settings to disable those views, but this isn't the case.
So its worth asking ourselves what harm is there in keeping those views around, and if we really want to get rid of those two views, how would we go about it?
Looking at the [imersonate.urls](https://bitbucket.org/petersanchez/django-impersonate/src/f898c697b2bd9945187f8667d680e6d10d06dc33/impersonate/urls.py?at=default&fileviewer=file-view-default), it may be as simple as updating our `urls.py` to explictly define `impersonate-start` and `impersonate-stop`, or perhaps we should open the issue upstream and see what library's author thinks about it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/urls.py`
Content:
```
1 from django.conf import settings
2 from django.conf.urls import url, include
3 from django.conf.urls.static import static
4 from django.contrib.sitemaps.views import sitemap
5 from django.contrib.staticfiles.views import serve
6 from django.views.i18n import JavaScriptCatalog
7 from graphene_django.views import GraphQLView
8
9 from .cart.urls import urlpatterns as cart_urls
10 from .checkout.urls import urlpatterns as checkout_urls
11 from .core.sitemaps import sitemaps
12 from .core.urls import urlpatterns as core_urls
13 from .dashboard.urls import urlpatterns as dashboard_urls
14 from .data_feeds.urls import urlpatterns as feed_urls
15 from .order.urls import urlpatterns as order_urls
16 from .product.urls import urlpatterns as product_urls
17 from .registration.urls import urlpatterns as registration_urls
18 from .search.urls import urlpatterns as search_urls
19 from .userprofile.urls import urlpatterns as userprofile_urls
20
21 urlpatterns = [
22 url(r'^', include(core_urls)),
23 url(r'^account/', include(registration_urls)),
24 url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),
25 url(r'^checkout/',
26 include((checkout_urls, 'checkout'), namespace='checkout')),
27 url(r'^dashboard/',
28 include((dashboard_urls, 'dashboard'), namespace='dashboard')),
29 url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
30 url(r'^impersonate/', include('impersonate.urls')),
31 url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
32 url(r'^order/', include((order_urls, 'order'), namespace='order')),
33 url(r'^products/',
34 include((product_urls, 'product'), namespace='product')),
35 url(r'^profile/',
36 include((userprofile_urls, 'profile'), namespace='profile')),
37 url(r'^feeds/',
38 include((feed_urls, 'data_feeds'), namespace='data_feeds')),
39 url(r'^search/', include((search_urls, 'search'), namespace='search')),
40 url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
41 name='django.contrib.sitemaps.views.sitemap'),
42 url(r'', include('payments.urls')),
43 url('', include('social_django.urls', namespace='social')),
44 ]
45
46 if settings.DEBUG:
47 # static files (images, css, javascript, etc.)
48 urlpatterns += [
49 url(r'^static/(?P<path>.*)$', serve)
50 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/urls.py b/saleor/urls.py
--- a/saleor/urls.py
+++ b/saleor/urls.py
@@ -5,6 +5,7 @@
from django.contrib.staticfiles.views import serve
from django.views.i18n import JavaScriptCatalog
from graphene_django.views import GraphQLView
+from impersonate.views import impersonate, stop_impersonate
from .cart.urls import urlpatterns as cart_urls
from .checkout.urls import urlpatterns as checkout_urls
@@ -27,7 +28,8 @@
url(r'^dashboard/',
include((dashboard_urls, 'dashboard'), namespace='dashboard')),
url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
- url(r'^impersonate/', include('impersonate.urls')),
+ url(r'^impersonate/stop/$', stop_impersonate, name='impersonate-stop'),
+ url(r'^impersonate/(?P<uid>\d+)/$', impersonate, name='impersonate-start'),
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^order/', include((order_urls, 'order'), namespace='order')),
url(r'^products/',
|
{"golden_diff": "diff --git a/saleor/urls.py b/saleor/urls.py\n--- a/saleor/urls.py\n+++ b/saleor/urls.py\n@@ -5,6 +5,7 @@\n from django.contrib.staticfiles.views import serve\n from django.views.i18n import JavaScriptCatalog\n from graphene_django.views import GraphQLView\n+from impersonate.views import impersonate, stop_impersonate\n \n from .cart.urls import urlpatterns as cart_urls\n from .checkout.urls import urlpatterns as checkout_urls\n@@ -27,7 +28,8 @@\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n- url(r'^impersonate/', include('impersonate.urls')),\n+ url(r'^impersonate/stop/$', stop_impersonate, name='impersonate-stop'),\n+ url(r'^impersonate/(?P<uid>\\d+)/$', impersonate, name='impersonate-start'),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n", "issue": "Remove or block impersonate's \"list\" and \"search\" urls\nSaleor uses the [django-impersonate](https://bitbucket.org/petersanchez/django-impersonate/overview) for client impersonation feature. While working on #1549 I've found out that in addition to two views that we are using (start and stop impersonating the user), the library brings additional two views that we don't really want to support:\r\n\r\nhttps://demo.getsaleor.com/impersonate/list/\r\nhttps://demo.getsaleor.com/impersonate/search/?q=admin (note: this one 500's on link)\r\n\r\nIdeally, library would've provided us with a settings to disable those views, but this isn't the case.\r\n\r\nSo its worth asking ourselves what harm is there in keeping those views around, and if we really want to get rid of those two views, how would we go about it?\r\n\r\nLooking at the [imersonate.urls](https://bitbucket.org/petersanchez/django-impersonate/src/f898c697b2bd9945187f8667d680e6d10d06dc33/impersonate/urls.py?at=default&fileviewer=file-view-default), it may be as simple as updating our `urls.py` to explictly define `impersonate-start` and `impersonate-stop`, or perhaps we should open the issue upstream and see what library's author thinks about it?\r\n \n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.views import serve\nfrom django.views.i18n import JavaScriptCatalog\nfrom graphene_django.views import GraphQLView\n\nfrom .cart.urls import urlpatterns as cart_urls\nfrom .checkout.urls import urlpatterns as checkout_urls\nfrom .core.sitemaps import sitemaps\nfrom .core.urls import urlpatterns as core_urls\nfrom .dashboard.urls import urlpatterns as dashboard_urls\nfrom .data_feeds.urls import urlpatterns as feed_urls\nfrom .order.urls import urlpatterns as order_urls\nfrom .product.urls import urlpatterns as product_urls\nfrom .registration.urls import urlpatterns as registration_urls\nfrom .search.urls import urlpatterns as search_urls\nfrom .userprofile.urls import urlpatterns as userprofile_urls\n\nurlpatterns = [\n url(r'^', include(core_urls)),\n url(r'^account/', include(registration_urls)),\n url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),\n url(r'^checkout/',\n include((checkout_urls, 'checkout'), namespace='checkout')),\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n url(r'^impersonate/', include('impersonate.urls')),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n include((product_urls, 'product'), namespace='product')),\n url(r'^profile/',\n include((userprofile_urls, 'profile'), namespace='profile')),\n url(r'^feeds/',\n include((feed_urls, 'data_feeds'), namespace='data_feeds')),\n url(r'^search/', include((search_urls, 'search'), namespace='search')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'', include('payments.urls')),\n url('', include('social_django.urls', namespace='social')),\n]\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += [\n url(r'^static/(?P<path>.*)$', serve)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "saleor/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.views import serve\nfrom django.views.i18n import JavaScriptCatalog\nfrom graphene_django.views import GraphQLView\nfrom impersonate.views import impersonate, stop_impersonate\n\nfrom .cart.urls import urlpatterns as cart_urls\nfrom .checkout.urls import urlpatterns as checkout_urls\nfrom .core.sitemaps import sitemaps\nfrom .core.urls import urlpatterns as core_urls\nfrom .dashboard.urls import urlpatterns as dashboard_urls\nfrom .data_feeds.urls import urlpatterns as feed_urls\nfrom .order.urls import urlpatterns as order_urls\nfrom .product.urls import urlpatterns as product_urls\nfrom .registration.urls import urlpatterns as registration_urls\nfrom .search.urls import urlpatterns as search_urls\nfrom .userprofile.urls import urlpatterns as userprofile_urls\n\nurlpatterns = [\n url(r'^', include(core_urls)),\n url(r'^account/', include(registration_urls)),\n url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),\n url(r'^checkout/',\n include((checkout_urls, 'checkout'), namespace='checkout')),\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n url(r'^impersonate/stop/$', stop_impersonate, name='impersonate-stop'),\n url(r'^impersonate/(?P<uid>\\d+)/$', impersonate, name='impersonate-start'),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n include((product_urls, 'product'), namespace='product')),\n url(r'^profile/',\n include((userprofile_urls, 'profile'), namespace='profile')),\n url(r'^feeds/',\n include((feed_urls, 'data_feeds'), namespace='data_feeds')),\n url(r'^search/', include((search_urls, 'search'), namespace='search')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'', include('payments.urls')),\n url('', include('social_django.urls', namespace='social')),\n]\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += [\n url(r'^static/(?P<path>.*)$', serve)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "saleor/urls.py"}]}
| 1,191 | 266 |
gh_patches_debug_16174
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-6912
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Firestore] WriteBatch doesn't return instance so you cannot chain.
The WriteBatch methods don’t return the WriteBatch instances for chaining.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `firestore/google/cloud/firestore_v1beta1/batch.py`
Content:
```
1 # Copyright 2017 Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helpers for batch requests to the Google Cloud Firestore API."""
16
17
18 from google.cloud.firestore_v1beta1 import _helpers
19
20
21 class WriteBatch(object):
22 """Accumulate write operations to be sent in a batch.
23
24 This has the same set of methods for write operations that
25 :class:`~.firestore_v1beta1.document.DocumentReference` does,
26 e.g. :meth:`~.firestore_v1beta1.document.DocumentReference.create`.
27
28 Args:
29 client (~.firestore_v1beta1.client.Client): The client that
30 created this batch.
31 """
32
33 def __init__(self, client):
34 self._client = client
35 self._write_pbs = []
36
37 def _add_write_pbs(self, write_pbs):
38 """Add `Write`` protobufs to this transaction.
39
40 This method intended to be over-ridden by subclasses.
41
42 Args:
43 write_pbs (List[google.cloud.proto.firestore.v1beta1.\
44 write_pb2.Write]): A list of write protobufs to be added.
45 """
46 self._write_pbs.extend(write_pbs)
47
48 def create(self, reference, document_data):
49 """Add a "change" to this batch to create a document.
50
51 If the document given by ``reference`` already exists, then this
52 batch will fail when :meth:`commit`-ed.
53
54 Args:
55 reference (~.firestore_v1beta1.document.DocumentReference): A
56 document reference to be created in this batch.
57 document_data (dict): Property names and values to use for
58 creating a document.
59 """
60 write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
61 self._add_write_pbs(write_pbs)
62
63 def set(self, reference, document_data, merge=False):
64 """Add a "change" to replace a document.
65
66 See
67 :meth:`~.firestore_v1beta1.document.DocumentReference.set` for
68 more information on how ``option`` determines how the change is
69 applied.
70
71 Args:
72 reference (~.firestore_v1beta1.document.DocumentReference):
73 A document reference that will have values set in this batch.
74 document_data (dict):
75 Property names and values to use for replacing a document.
76 merge (Optional[bool] or Optional[List<apispec>]):
77 If True, apply merging instead of overwriting the state
78 of the document.
79 """
80 if merge is not False:
81 write_pbs = _helpers.pbs_for_set_with_merge(
82 reference._document_path, document_data, merge
83 )
84 else:
85 write_pbs = _helpers.pbs_for_set_no_merge(
86 reference._document_path, document_data
87 )
88
89 self._add_write_pbs(write_pbs)
90
91 def update(self, reference, field_updates, option=None):
92 """Add a "change" to update a document.
93
94 See
95 :meth:`~.firestore_v1beta1.document.DocumentReference.update` for
96 more information on ``field_updates`` and ``option``.
97
98 Args:
99 reference (~.firestore_v1beta1.document.DocumentReference): A
100 document reference that will be deleted in this batch.
101 field_updates (dict): Field names or paths to update and values
102 to update with.
103 option (Optional[~.firestore_v1beta1.client.WriteOption]): A
104 write option to make assertions / preconditions on the server
105 state of the document before applying changes.
106 """
107 if option.__class__.__name__ == "ExistsOption":
108 raise ValueError("you must not pass an explicit write option to " "update.")
109 write_pbs = _helpers.pbs_for_update(
110 reference._document_path, field_updates, option
111 )
112 self._add_write_pbs(write_pbs)
113
114 def delete(self, reference, option=None):
115 """Add a "change" to delete a document.
116
117 See
118 :meth:`~.firestore_v1beta1.document.DocumentReference.delete` for
119 more information on how ``option`` determines how the change is
120 applied.
121
122 Args:
123 reference (~.firestore_v1beta1.document.DocumentReference): A
124 document reference that will be deleted in this batch.
125 option (Optional[~.firestore_v1beta1.client.WriteOption]): A
126 write option to make assertions / preconditions on the server
127 state of the document before applying changes.
128 """
129 write_pb = _helpers.pb_for_delete(reference._document_path, option)
130 self._add_write_pbs([write_pb])
131
132 def commit(self):
133 """Commit the changes accumulated in this batch.
134
135 Returns:
136 List[google.cloud.proto.firestore.v1beta1.\
137 write_pb2.WriteResult, ...]: The write results corresponding
138 to the changes committed, returned in the same order as the
139 changes were applied to this batch. A write result contains an
140 ``update_time`` field.
141 """
142 commit_response = self._client._firestore_api.commit(
143 self._client._database_string,
144 self._write_pbs,
145 transaction=None,
146 metadata=self._client._rpc_metadata,
147 )
148
149 self._write_pbs = []
150 return list(commit_response.write_results)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/firestore/google/cloud/firestore_v1beta1/batch.py b/firestore/google/cloud/firestore_v1beta1/batch.py
--- a/firestore/google/cloud/firestore_v1beta1/batch.py
+++ b/firestore/google/cloud/firestore_v1beta1/batch.py
@@ -33,6 +33,8 @@
def __init__(self, client):
self._client = client
self._write_pbs = []
+ self.write_results = None
+ self.commit_time = None
def _add_write_pbs(self, write_pbs):
"""Add `Write`` protobufs to this transaction.
@@ -147,4 +149,13 @@
)
self._write_pbs = []
- return list(commit_response.write_results)
+ self.write_results = results = list(commit_response.write_results)
+ self.commit_time = commit_response.commit_time
+ return results
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ self.commit()
|
{"golden_diff": "diff --git a/firestore/google/cloud/firestore_v1beta1/batch.py b/firestore/google/cloud/firestore_v1beta1/batch.py\n--- a/firestore/google/cloud/firestore_v1beta1/batch.py\n+++ b/firestore/google/cloud/firestore_v1beta1/batch.py\n@@ -33,6 +33,8 @@\n def __init__(self, client):\n self._client = client\n self._write_pbs = []\n+ self.write_results = None\n+ self.commit_time = None\n \n def _add_write_pbs(self, write_pbs):\n \"\"\"Add `Write`` protobufs to this transaction.\n@@ -147,4 +149,13 @@\n )\n \n self._write_pbs = []\n- return list(commit_response.write_results)\n+ self.write_results = results = list(commit_response.write_results)\n+ self.commit_time = commit_response.commit_time\n+ return results\n+\n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, exc_type, exc_value, traceback):\n+ if exc_type is None:\n+ self.commit()\n", "issue": "[Firestore] WriteBatch doesn't return instance so you cannot chain.\nThe WriteBatch methods don\u2019t return the WriteBatch instances for chaining.\r\n\n", "before_files": [{"content": "# Copyright 2017 Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for batch requests to the Google Cloud Firestore API.\"\"\"\n\n\nfrom google.cloud.firestore_v1beta1 import _helpers\n\n\nclass WriteBatch(object):\n \"\"\"Accumulate write operations to be sent in a batch.\n\n This has the same set of methods for write operations that\n :class:`~.firestore_v1beta1.document.DocumentReference` does,\n e.g. :meth:`~.firestore_v1beta1.document.DocumentReference.create`.\n\n Args:\n client (~.firestore_v1beta1.client.Client): The client that\n created this batch.\n \"\"\"\n\n def __init__(self, client):\n self._client = client\n self._write_pbs = []\n\n def _add_write_pbs(self, write_pbs):\n \"\"\"Add `Write`` protobufs to this transaction.\n\n This method intended to be over-ridden by subclasses.\n\n Args:\n write_pbs (List[google.cloud.proto.firestore.v1beta1.\\\n write_pb2.Write]): A list of write protobufs to be added.\n \"\"\"\n self._write_pbs.extend(write_pbs)\n\n def create(self, reference, document_data):\n \"\"\"Add a \"change\" to this batch to create a document.\n\n If the document given by ``reference`` already exists, then this\n batch will fail when :meth:`commit`-ed.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference to be created in this batch.\n document_data (dict): Property names and values to use for\n creating a document.\n \"\"\"\n write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)\n self._add_write_pbs(write_pbs)\n\n def set(self, reference, document_data, merge=False):\n \"\"\"Add a \"change\" to replace a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.set` for\n more information on how ``option`` determines how the change is\n applied.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference):\n A document reference that will have values set in this batch.\n document_data (dict):\n Property names and values to use for replacing a document.\n merge (Optional[bool] or Optional[List<apispec>]):\n If True, apply merging instead of overwriting the state\n of the document.\n \"\"\"\n if merge is not False:\n write_pbs = _helpers.pbs_for_set_with_merge(\n reference._document_path, document_data, merge\n )\n else:\n write_pbs = _helpers.pbs_for_set_no_merge(\n reference._document_path, document_data\n )\n\n self._add_write_pbs(write_pbs)\n\n def update(self, reference, field_updates, option=None):\n \"\"\"Add a \"change\" to update a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.update` for\n more information on ``field_updates`` and ``option``.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference that will be deleted in this batch.\n field_updates (dict): Field names or paths to update and values\n to update with.\n option (Optional[~.firestore_v1beta1.client.WriteOption]): A\n write option to make assertions / preconditions on the server\n state of the document before applying changes.\n \"\"\"\n if option.__class__.__name__ == \"ExistsOption\":\n raise ValueError(\"you must not pass an explicit write option to \" \"update.\")\n write_pbs = _helpers.pbs_for_update(\n reference._document_path, field_updates, option\n )\n self._add_write_pbs(write_pbs)\n\n def delete(self, reference, option=None):\n \"\"\"Add a \"change\" to delete a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.delete` for\n more information on how ``option`` determines how the change is\n applied.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference that will be deleted in this batch.\n option (Optional[~.firestore_v1beta1.client.WriteOption]): A\n write option to make assertions / preconditions on the server\n state of the document before applying changes.\n \"\"\"\n write_pb = _helpers.pb_for_delete(reference._document_path, option)\n self._add_write_pbs([write_pb])\n\n def commit(self):\n \"\"\"Commit the changes accumulated in this batch.\n\n Returns:\n List[google.cloud.proto.firestore.v1beta1.\\\n write_pb2.WriteResult, ...]: The write results corresponding\n to the changes committed, returned in the same order as the\n changes were applied to this batch. A write result contains an\n ``update_time`` field.\n \"\"\"\n commit_response = self._client._firestore_api.commit(\n self._client._database_string,\n self._write_pbs,\n transaction=None,\n metadata=self._client._rpc_metadata,\n )\n\n self._write_pbs = []\n return list(commit_response.write_results)\n", "path": "firestore/google/cloud/firestore_v1beta1/batch.py"}], "after_files": [{"content": "# Copyright 2017 Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for batch requests to the Google Cloud Firestore API.\"\"\"\n\n\nfrom google.cloud.firestore_v1beta1 import _helpers\n\n\nclass WriteBatch(object):\n \"\"\"Accumulate write operations to be sent in a batch.\n\n This has the same set of methods for write operations that\n :class:`~.firestore_v1beta1.document.DocumentReference` does,\n e.g. :meth:`~.firestore_v1beta1.document.DocumentReference.create`.\n\n Args:\n client (~.firestore_v1beta1.client.Client): The client that\n created this batch.\n \"\"\"\n\n def __init__(self, client):\n self._client = client\n self._write_pbs = []\n self.write_results = None\n self.commit_time = None\n\n def _add_write_pbs(self, write_pbs):\n \"\"\"Add `Write`` protobufs to this transaction.\n\n This method intended to be over-ridden by subclasses.\n\n Args:\n write_pbs (List[google.cloud.proto.firestore.v1beta1.\\\n write_pb2.Write]): A list of write protobufs to be added.\n \"\"\"\n self._write_pbs.extend(write_pbs)\n\n def create(self, reference, document_data):\n \"\"\"Add a \"change\" to this batch to create a document.\n\n If the document given by ``reference`` already exists, then this\n batch will fail when :meth:`commit`-ed.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference to be created in this batch.\n document_data (dict): Property names and values to use for\n creating a document.\n \"\"\"\n write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)\n self._add_write_pbs(write_pbs)\n\n def set(self, reference, document_data, merge=False):\n \"\"\"Add a \"change\" to replace a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.set` for\n more information on how ``option`` determines how the change is\n applied.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference):\n A document reference that will have values set in this batch.\n document_data (dict):\n Property names and values to use for replacing a document.\n merge (Optional[bool] or Optional[List<apispec>]):\n If True, apply merging instead of overwriting the state\n of the document.\n \"\"\"\n if merge is not False:\n write_pbs = _helpers.pbs_for_set_with_merge(\n reference._document_path, document_data, merge\n )\n else:\n write_pbs = _helpers.pbs_for_set_no_merge(\n reference._document_path, document_data\n )\n\n self._add_write_pbs(write_pbs)\n\n def update(self, reference, field_updates, option=None):\n \"\"\"Add a \"change\" to update a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.update` for\n more information on ``field_updates`` and ``option``.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference that will be deleted in this batch.\n field_updates (dict): Field names or paths to update and values\n to update with.\n option (Optional[~.firestore_v1beta1.client.WriteOption]): A\n write option to make assertions / preconditions on the server\n state of the document before applying changes.\n \"\"\"\n if option.__class__.__name__ == \"ExistsOption\":\n raise ValueError(\"you must not pass an explicit write option to \" \"update.\")\n write_pbs = _helpers.pbs_for_update(\n reference._document_path, field_updates, option\n )\n self._add_write_pbs(write_pbs)\n\n def delete(self, reference, option=None):\n \"\"\"Add a \"change\" to delete a document.\n\n See\n :meth:`~.firestore_v1beta1.document.DocumentReference.delete` for\n more information on how ``option`` determines how the change is\n applied.\n\n Args:\n reference (~.firestore_v1beta1.document.DocumentReference): A\n document reference that will be deleted in this batch.\n option (Optional[~.firestore_v1beta1.client.WriteOption]): A\n write option to make assertions / preconditions on the server\n state of the document before applying changes.\n \"\"\"\n write_pb = _helpers.pb_for_delete(reference._document_path, option)\n self._add_write_pbs([write_pb])\n\n def commit(self):\n \"\"\"Commit the changes accumulated in this batch.\n\n Returns:\n List[google.cloud.proto.firestore.v1beta1.\\\n write_pb2.WriteResult, ...]: The write results corresponding\n to the changes committed, returned in the same order as the\n changes were applied to this batch. A write result contains an\n ``update_time`` field.\n \"\"\"\n commit_response = self._client._firestore_api.commit(\n self._client._database_string,\n self._write_pbs,\n transaction=None,\n metadata=self._client._rpc_metadata,\n )\n\n self._write_pbs = []\n self.write_results = results = list(commit_response.write_results)\n self.commit_time = commit_response.commit_time\n return results\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is None:\n self.commit()\n", "path": "firestore/google/cloud/firestore_v1beta1/batch.py"}]}
| 1,902 | 250 |
gh_patches_debug_16952
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-3223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenMPI: Unknown UCC configure option
Trying to build OpenMPI 4.1.1 I get the error/warning that `--with-ucc` is not a known configure option.
It was added in https://github.com/easybuilders/easybuild-easyblocks/pull/2847
@SebastianAchilles Do you remember which version has this for sure, i.e. where you found that to be missing/required/supported?
We might need to add a version check there.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/o/openmpi.py`
Content:
```
1 ##
2 # Copyright 2019-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for OpenMPI, implemented as an easyblock
27
28 @author: Kenneth Hoste (Ghent University)
29 @author: Robert Mijakovic (LuxProvide)
30 """
31 import os
32 import re
33 from easybuild.tools import LooseVersion
34
35 import easybuild.tools.toolchain as toolchain
36 from easybuild.easyblocks.generic.configuremake import ConfigureMake
37 from easybuild.framework.easyconfig.constants import EASYCONFIG_CONSTANTS
38 from easybuild.tools.build_log import EasyBuildError
39 from easybuild.tools.config import build_option
40 from easybuild.tools.modules import get_software_root
41 from easybuild.tools.systemtools import check_os_dependency, get_shared_lib_ext
42 from easybuild.tools.toolchain.mpi import get_mpi_cmd_template
43
44
45 class EB_OpenMPI(ConfigureMake):
46 """OpenMPI easyblock."""
47
48 def configure_step(self):
49 """Custom configuration step for OpenMPI."""
50
51 def config_opt_used(key, enable_opt=False):
52 """Helper function to check whether a configure option is already specified in 'configopts'."""
53 if enable_opt:
54 regex = '--(disable|enable)-%s' % key
55 else:
56 regex = '--(with|without)-%s' % key
57
58 return bool(re.search(regex, self.cfg['configopts']))
59
60 config_opt_names = [
61 # suppress failure modes in relation to mpirun path
62 'mpirun-prefix-by-default',
63 # build shared libraries
64 'shared',
65 ]
66
67 for key in config_opt_names:
68 if not config_opt_used(key, enable_opt=True):
69 self.cfg.update('configopts', '--enable-%s' % key)
70
71 # List of EasyBuild dependencies for which OMPI has known options
72 known_dependencies = ('CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX', 'UCC')
73 # Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig
74 # No entry is interpreted as no option added at all
75 # This is to make builds reproducible even when the system libraries are changed and avoids failures
76 # due to e.g. finding only PMIx but not libevent on the system
77 unused_dep_value = dict()
78 # Known options since version 3.0 (no earlier ones checked)
79 if LooseVersion(self.version) >= LooseVersion('3.0'):
80 # Default to disable the option with "no"
81 unused_dep_value = {dep: 'no' for dep in known_dependencies}
82 # For these the default is to use an internal copy and not using any is not supported
83 for dep in ('hwloc', 'libevent', 'PMIx'):
84 unused_dep_value[dep] = 'internal'
85
86 # handle dependencies
87 for dep in known_dependencies:
88 opt_name = dep.lower()
89 # If the option is already used, don't add it
90 if config_opt_used(opt_name):
91 continue
92
93 # libfabric option renamed in OpenMPI 3.1.0 to ofi
94 if dep == 'libfabric' and LooseVersion(self.version) >= LooseVersion('3.1'):
95 opt_name = 'ofi'
96 # Check new option name. They are synonyms since 3.1.0 for backward compatibility
97 if config_opt_used(opt_name):
98 continue
99
100 dep_root = get_software_root(dep)
101 # If the dependency is loaded, specify its path, else use the "unused" value, if any
102 if dep_root:
103 opt_value = dep_root
104 else:
105 opt_value = unused_dep_value.get(dep)
106 if opt_value is not None:
107 self.cfg.update('configopts', '--with-%s=%s' % (opt_name, opt_value))
108
109 if bool(get_software_root('PMIx')) != bool(get_software_root('libevent')):
110 raise EasyBuildError('You must either use both PMIx and libevent as dependencies or none of them. '
111 'This is to enforce the same libevent is used for OpenMPI as for PMIx or '
112 'the behavior may be unpredictable.')
113
114 # check whether VERBS support should be enabled
115 if not config_opt_used('verbs'):
116
117 # for OpenMPI v4.x, the openib BTL should be disabled when UCX is used;
118 # this is required to avoid "error initializing an OpenFabrics device" warnings,
119 # see also https://www.open-mpi.org/faq/?category=all#ofa-device-error
120 is_ucx_enabled = ('--with-ucx' in self.cfg['configopts'] and
121 '--with-ucx=no' not in self.cfg['configopts'])
122 if LooseVersion(self.version) >= LooseVersion('4.0.0') and is_ucx_enabled:
123 verbs = False
124 else:
125 # auto-detect based on available OS packages
126 os_packages = EASYCONFIG_CONSTANTS['OS_PKG_IBVERBS_DEV'][0]
127 verbs = any(check_os_dependency(osdep) for osdep in os_packages)
128 # for OpenMPI v5.x, the verbs support is removed, only UCX is available
129 # see https://github.com/open-mpi/ompi/pull/6270
130 if LooseVersion(self.version) <= LooseVersion('5.0.0'):
131 if verbs:
132 self.cfg.update('configopts', '--with-verbs')
133 else:
134 self.cfg.update('configopts', '--without-verbs')
135
136 super(EB_OpenMPI, self).configure_step()
137
138 def test_step(self):
139 """Test step for OpenMPI"""
140 # Default to `make check` if nothing is set. Disable with "runtest = False" in the EC
141 if self.cfg['runtest'] is None:
142 self.cfg['runtest'] = 'check'
143
144 super(EB_OpenMPI, self).test_step()
145
146 def load_module(self, *args, **kwargs):
147 """
148 Load (temporary) module file, after resetting to initial environment.
149
150 Also put RPATH wrappers back in place if needed, to ensure that sanity check commands work as expected.
151 """
152 super(EB_OpenMPI, self).load_module(*args, **kwargs)
153
154 # ensure RPATH wrappers are in place, otherwise compiling minimal test programs will fail
155 if build_option('rpath'):
156 if self.toolchain.options.get('rpath', True):
157 self.toolchain.prepare_rpath_wrappers(rpath_filter_dirs=self.rpath_filter_dirs,
158 rpath_include_dirs=self.rpath_include_dirs)
159
160 def sanity_check_step(self):
161 """Custom sanity check for OpenMPI."""
162
163 bin_names = ['mpicc', 'mpicxx', 'mpif90', 'mpifort', 'mpirun', 'ompi_info', 'opal_wrapper']
164 if LooseVersion(self.version) >= LooseVersion('5.0.0'):
165 bin_names.append('prterun')
166 else:
167 bin_names.append('orterun')
168 bin_files = [os.path.join('bin', x) for x in bin_names]
169
170 shlib_ext = get_shared_lib_ext()
171 lib_names = ['mpi_mpifh', 'mpi', 'open-pal']
172 if LooseVersion(self.version) >= LooseVersion('5.0.0'):
173 lib_names.append('prrte')
174 else:
175 lib_names.extend(['ompitrace', 'open-rte'])
176 lib_files = [os.path.join('lib', 'lib%s.%s' % (x, shlib_ext)) for x in lib_names]
177
178 inc_names = ['mpi-ext', 'mpif-config', 'mpif', 'mpi', 'mpi_portable_platform']
179 if LooseVersion(self.version) >= LooseVersion('5.0.0'):
180 inc_names.append('prte')
181 inc_files = [os.path.join('include', x + '.h') for x in inc_names]
182
183 custom_paths = {
184 'files': bin_files + inc_files + lib_files,
185 'dirs': [],
186 }
187
188 # make sure MPI compiler wrappers pick up correct compilers
189 expected = {
190 'mpicc': os.getenv('CC', 'gcc'),
191 'mpicxx': os.getenv('CXX', 'g++'),
192 'mpifort': os.getenv('FC', 'gfortran'),
193 'mpif90': os.getenv('F90', 'gfortran'),
194 }
195 # actual pattern for gfortran is "GNU Fortran"
196 for key in ['mpifort', 'mpif90']:
197 if expected[key] == 'gfortran':
198 expected[key] = "GNU Fortran"
199 # for PGI, correct pattern is "pgfortran" with mpif90
200 if expected['mpif90'] == 'pgf90':
201 expected['mpif90'] = 'pgfortran'
202 # for Clang the pattern is always clang
203 for key in ['mpicxx', 'mpifort', 'mpif90']:
204 if expected[key] in ['clang++', 'flang']:
205 expected[key] = 'clang'
206
207 custom_commands = ["%s --version | grep '%s'" % (key, expected[key]) for key in sorted(expected.keys())]
208
209 # Add minimal test program to sanity checks
210 # Run with correct MPI launcher
211 mpi_cmd_tmpl, params = get_mpi_cmd_template(toolchain.OPENMPI, dict(), mpi_version=self.version)
212 # Limit number of ranks to 8 to avoid it failing due to hyperthreading
213 ranks = min(8, self.cfg['parallel'])
214 for srcdir, src, compiler in (
215 ('examples', 'hello_c.c', 'mpicc'),
216 ('examples', 'hello_mpifh.f', 'mpifort'),
217 ('examples', 'hello_usempi.f90', 'mpif90'),
218 ('examples', 'ring_c.c', 'mpicc'),
219 ('examples', 'ring_mpifh.f', 'mpifort'),
220 ('examples', 'ring_usempi.f90', 'mpif90'),
221 ('test/simple', 'thread_init.c', 'mpicc'),
222 ('test/simple', 'intercomm1.c', 'mpicc'),
223 ('test/simple', 'mpi_barrier.c', 'mpicc'),
224 ):
225 src_path = os.path.join(self.cfg['start_dir'], srcdir, src)
226 if os.path.exists(src_path):
227 test_exe = os.path.join(self.builddir, 'mpi_test_' + os.path.splitext(src)[0])
228 self.log.info("Adding minimal MPI test program to sanity checks: %s", test_exe)
229
230 # Build test binary
231 custom_commands.append("%s %s -o %s" % (compiler, src_path, test_exe))
232
233 # Run the test if chosen
234 if build_option('mpi_tests'):
235 params.update({'nr_ranks': ranks, 'cmd': test_exe})
236 # Allow oversubscription for this test (in case of hyperthreading)
237 custom_commands.append("OMPI_MCA_rmaps_base_oversubscribe=1 " + mpi_cmd_tmpl % params)
238 # Run with 1 process which may trigger other bugs
239 # See https://github.com/easybuilders/easybuild-easyconfigs/issues/12978
240 params['nr_ranks'] = 1
241 custom_commands.append(mpi_cmd_tmpl % params)
242
243 super(EB_OpenMPI, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/easyblocks/o/openmpi.py b/easybuild/easyblocks/o/openmpi.py
--- a/easybuild/easyblocks/o/openmpi.py
+++ b/easybuild/easyblocks/o/openmpi.py
@@ -69,7 +69,10 @@
self.cfg.update('configopts', '--enable-%s' % key)
# List of EasyBuild dependencies for which OMPI has known options
- known_dependencies = ('CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX', 'UCC')
+ known_dependencies = ['CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX']
+ if LooseVersion(self.version) >= '4.1.4':
+ known_dependencies.append('UCC')
+
# Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig
# No entry is interpreted as no option added at all
# This is to make builds reproducible even when the system libraries are changed and avoids failures
|
{"golden_diff": "diff --git a/easybuild/easyblocks/o/openmpi.py b/easybuild/easyblocks/o/openmpi.py\n--- a/easybuild/easyblocks/o/openmpi.py\n+++ b/easybuild/easyblocks/o/openmpi.py\n@@ -69,7 +69,10 @@\n self.cfg.update('configopts', '--enable-%s' % key)\n \n # List of EasyBuild dependencies for which OMPI has known options\n- known_dependencies = ('CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX', 'UCC')\n+ known_dependencies = ['CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX']\n+ if LooseVersion(self.version) >= '4.1.4':\n+ known_dependencies.append('UCC')\n+\n # Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig\n # No entry is interpreted as no option added at all\n # This is to make builds reproducible even when the system libraries are changed and avoids failures\n", "issue": "OpenMPI: Unknown UCC configure option\nTrying to build OpenMPI 4.1.1 I get the error/warning that `--with-ucc` is not a known configure option.\r\n\r\nIt was added in https://github.com/easybuilders/easybuild-easyblocks/pull/2847 \r\n\r\n@SebastianAchilles Do you remember which version has this for sure, i.e. where you found that to be missing/required/supported?\r\n\r\nWe might need to add a version check there.\n", "before_files": [{"content": "##\n# Copyright 2019-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for OpenMPI, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n@author: Robert Mijakovic (LuxProvide)\n\"\"\"\nimport os\nimport re\nfrom easybuild.tools import LooseVersion\n\nimport easybuild.tools.toolchain as toolchain\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig.constants import EASYCONFIG_CONSTANTS\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.modules import get_software_root\nfrom easybuild.tools.systemtools import check_os_dependency, get_shared_lib_ext\nfrom easybuild.tools.toolchain.mpi import get_mpi_cmd_template\n\n\nclass EB_OpenMPI(ConfigureMake):\n \"\"\"OpenMPI easyblock.\"\"\"\n\n def configure_step(self):\n \"\"\"Custom configuration step for OpenMPI.\"\"\"\n\n def config_opt_used(key, enable_opt=False):\n \"\"\"Helper function to check whether a configure option is already specified in 'configopts'.\"\"\"\n if enable_opt:\n regex = '--(disable|enable)-%s' % key\n else:\n regex = '--(with|without)-%s' % key\n\n return bool(re.search(regex, self.cfg['configopts']))\n\n config_opt_names = [\n # suppress failure modes in relation to mpirun path\n 'mpirun-prefix-by-default',\n # build shared libraries\n 'shared',\n ]\n\n for key in config_opt_names:\n if not config_opt_used(key, enable_opt=True):\n self.cfg.update('configopts', '--enable-%s' % key)\n\n # List of EasyBuild dependencies for which OMPI has known options\n known_dependencies = ('CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX', 'UCC')\n # Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig\n # No entry is interpreted as no option added at all\n # This is to make builds reproducible even when the system libraries are changed and avoids failures\n # due to e.g. finding only PMIx but not libevent on the system\n unused_dep_value = dict()\n # Known options since version 3.0 (no earlier ones checked)\n if LooseVersion(self.version) >= LooseVersion('3.0'):\n # Default to disable the option with \"no\"\n unused_dep_value = {dep: 'no' for dep in known_dependencies}\n # For these the default is to use an internal copy and not using any is not supported\n for dep in ('hwloc', 'libevent', 'PMIx'):\n unused_dep_value[dep] = 'internal'\n\n # handle dependencies\n for dep in known_dependencies:\n opt_name = dep.lower()\n # If the option is already used, don't add it\n if config_opt_used(opt_name):\n continue\n\n # libfabric option renamed in OpenMPI 3.1.0 to ofi\n if dep == 'libfabric' and LooseVersion(self.version) >= LooseVersion('3.1'):\n opt_name = 'ofi'\n # Check new option name. They are synonyms since 3.1.0 for backward compatibility\n if config_opt_used(opt_name):\n continue\n\n dep_root = get_software_root(dep)\n # If the dependency is loaded, specify its path, else use the \"unused\" value, if any\n if dep_root:\n opt_value = dep_root\n else:\n opt_value = unused_dep_value.get(dep)\n if opt_value is not None:\n self.cfg.update('configopts', '--with-%s=%s' % (opt_name, opt_value))\n\n if bool(get_software_root('PMIx')) != bool(get_software_root('libevent')):\n raise EasyBuildError('You must either use both PMIx and libevent as dependencies or none of them. '\n 'This is to enforce the same libevent is used for OpenMPI as for PMIx or '\n 'the behavior may be unpredictable.')\n\n # check whether VERBS support should be enabled\n if not config_opt_used('verbs'):\n\n # for OpenMPI v4.x, the openib BTL should be disabled when UCX is used;\n # this is required to avoid \"error initializing an OpenFabrics device\" warnings,\n # see also https://www.open-mpi.org/faq/?category=all#ofa-device-error\n is_ucx_enabled = ('--with-ucx' in self.cfg['configopts'] and\n '--with-ucx=no' not in self.cfg['configopts'])\n if LooseVersion(self.version) >= LooseVersion('4.0.0') and is_ucx_enabled:\n verbs = False\n else:\n # auto-detect based on available OS packages\n os_packages = EASYCONFIG_CONSTANTS['OS_PKG_IBVERBS_DEV'][0]\n verbs = any(check_os_dependency(osdep) for osdep in os_packages)\n # for OpenMPI v5.x, the verbs support is removed, only UCX is available\n # see https://github.com/open-mpi/ompi/pull/6270\n if LooseVersion(self.version) <= LooseVersion('5.0.0'):\n if verbs:\n self.cfg.update('configopts', '--with-verbs')\n else:\n self.cfg.update('configopts', '--without-verbs')\n\n super(EB_OpenMPI, self).configure_step()\n\n def test_step(self):\n \"\"\"Test step for OpenMPI\"\"\"\n # Default to `make check` if nothing is set. Disable with \"runtest = False\" in the EC\n if self.cfg['runtest'] is None:\n self.cfg['runtest'] = 'check'\n\n super(EB_OpenMPI, self).test_step()\n\n def load_module(self, *args, **kwargs):\n \"\"\"\n Load (temporary) module file, after resetting to initial environment.\n\n Also put RPATH wrappers back in place if needed, to ensure that sanity check commands work as expected.\n \"\"\"\n super(EB_OpenMPI, self).load_module(*args, **kwargs)\n\n # ensure RPATH wrappers are in place, otherwise compiling minimal test programs will fail\n if build_option('rpath'):\n if self.toolchain.options.get('rpath', True):\n self.toolchain.prepare_rpath_wrappers(rpath_filter_dirs=self.rpath_filter_dirs,\n rpath_include_dirs=self.rpath_include_dirs)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for OpenMPI.\"\"\"\n\n bin_names = ['mpicc', 'mpicxx', 'mpif90', 'mpifort', 'mpirun', 'ompi_info', 'opal_wrapper']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n bin_names.append('prterun')\n else:\n bin_names.append('orterun')\n bin_files = [os.path.join('bin', x) for x in bin_names]\n\n shlib_ext = get_shared_lib_ext()\n lib_names = ['mpi_mpifh', 'mpi', 'open-pal']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n lib_names.append('prrte')\n else:\n lib_names.extend(['ompitrace', 'open-rte'])\n lib_files = [os.path.join('lib', 'lib%s.%s' % (x, shlib_ext)) for x in lib_names]\n\n inc_names = ['mpi-ext', 'mpif-config', 'mpif', 'mpi', 'mpi_portable_platform']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n inc_names.append('prte')\n inc_files = [os.path.join('include', x + '.h') for x in inc_names]\n\n custom_paths = {\n 'files': bin_files + inc_files + lib_files,\n 'dirs': [],\n }\n\n # make sure MPI compiler wrappers pick up correct compilers\n expected = {\n 'mpicc': os.getenv('CC', 'gcc'),\n 'mpicxx': os.getenv('CXX', 'g++'),\n 'mpifort': os.getenv('FC', 'gfortran'),\n 'mpif90': os.getenv('F90', 'gfortran'),\n }\n # actual pattern for gfortran is \"GNU Fortran\"\n for key in ['mpifort', 'mpif90']:\n if expected[key] == 'gfortran':\n expected[key] = \"GNU Fortran\"\n # for PGI, correct pattern is \"pgfortran\" with mpif90\n if expected['mpif90'] == 'pgf90':\n expected['mpif90'] = 'pgfortran'\n # for Clang the pattern is always clang\n for key in ['mpicxx', 'mpifort', 'mpif90']:\n if expected[key] in ['clang++', 'flang']:\n expected[key] = 'clang'\n\n custom_commands = [\"%s --version | grep '%s'\" % (key, expected[key]) for key in sorted(expected.keys())]\n\n # Add minimal test program to sanity checks\n # Run with correct MPI launcher\n mpi_cmd_tmpl, params = get_mpi_cmd_template(toolchain.OPENMPI, dict(), mpi_version=self.version)\n # Limit number of ranks to 8 to avoid it failing due to hyperthreading\n ranks = min(8, self.cfg['parallel'])\n for srcdir, src, compiler in (\n ('examples', 'hello_c.c', 'mpicc'),\n ('examples', 'hello_mpifh.f', 'mpifort'),\n ('examples', 'hello_usempi.f90', 'mpif90'),\n ('examples', 'ring_c.c', 'mpicc'),\n ('examples', 'ring_mpifh.f', 'mpifort'),\n ('examples', 'ring_usempi.f90', 'mpif90'),\n ('test/simple', 'thread_init.c', 'mpicc'),\n ('test/simple', 'intercomm1.c', 'mpicc'),\n ('test/simple', 'mpi_barrier.c', 'mpicc'),\n ):\n src_path = os.path.join(self.cfg['start_dir'], srcdir, src)\n if os.path.exists(src_path):\n test_exe = os.path.join(self.builddir, 'mpi_test_' + os.path.splitext(src)[0])\n self.log.info(\"Adding minimal MPI test program to sanity checks: %s\", test_exe)\n\n # Build test binary\n custom_commands.append(\"%s %s -o %s\" % (compiler, src_path, test_exe))\n\n # Run the test if chosen\n if build_option('mpi_tests'):\n params.update({'nr_ranks': ranks, 'cmd': test_exe})\n # Allow oversubscription for this test (in case of hyperthreading)\n custom_commands.append(\"OMPI_MCA_rmaps_base_oversubscribe=1 \" + mpi_cmd_tmpl % params)\n # Run with 1 process which may trigger other bugs\n # See https://github.com/easybuilders/easybuild-easyconfigs/issues/12978\n params['nr_ranks'] = 1\n custom_commands.append(mpi_cmd_tmpl % params)\n\n super(EB_OpenMPI, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n", "path": "easybuild/easyblocks/o/openmpi.py"}], "after_files": [{"content": "##\n# Copyright 2019-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for OpenMPI, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n@author: Robert Mijakovic (LuxProvide)\n\"\"\"\nimport os\nimport re\nfrom easybuild.tools import LooseVersion\n\nimport easybuild.tools.toolchain as toolchain\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig.constants import EASYCONFIG_CONSTANTS\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.modules import get_software_root\nfrom easybuild.tools.systemtools import check_os_dependency, get_shared_lib_ext\nfrom easybuild.tools.toolchain.mpi import get_mpi_cmd_template\n\n\nclass EB_OpenMPI(ConfigureMake):\n \"\"\"OpenMPI easyblock.\"\"\"\n\n def configure_step(self):\n \"\"\"Custom configuration step for OpenMPI.\"\"\"\n\n def config_opt_used(key, enable_opt=False):\n \"\"\"Helper function to check whether a configure option is already specified in 'configopts'.\"\"\"\n if enable_opt:\n regex = '--(disable|enable)-%s' % key\n else:\n regex = '--(with|without)-%s' % key\n\n return bool(re.search(regex, self.cfg['configopts']))\n\n config_opt_names = [\n # suppress failure modes in relation to mpirun path\n 'mpirun-prefix-by-default',\n # build shared libraries\n 'shared',\n ]\n\n for key in config_opt_names:\n if not config_opt_used(key, enable_opt=True):\n self.cfg.update('configopts', '--enable-%s' % key)\n\n # List of EasyBuild dependencies for which OMPI has known options\n known_dependencies = ['CUDA', 'hwloc', 'libevent', 'libfabric', 'PMIx', 'UCX']\n if LooseVersion(self.version) >= '4.1.4':\n known_dependencies.append('UCC')\n\n # Value to use for `--with-<dep>=<value>` if the dependency is not specified in the easyconfig\n # No entry is interpreted as no option added at all\n # This is to make builds reproducible even when the system libraries are changed and avoids failures\n # due to e.g. finding only PMIx but not libevent on the system\n unused_dep_value = dict()\n # Known options since version 3.0 (no earlier ones checked)\n if LooseVersion(self.version) >= LooseVersion('3.0'):\n # Default to disable the option with \"no\"\n unused_dep_value = {dep: 'no' for dep in known_dependencies}\n # For these the default is to use an internal copy and not using any is not supported\n for dep in ('hwloc', 'libevent', 'PMIx'):\n unused_dep_value[dep] = 'internal'\n\n # handle dependencies\n for dep in known_dependencies:\n opt_name = dep.lower()\n # If the option is already used, don't add it\n if config_opt_used(opt_name):\n continue\n\n # libfabric option renamed in OpenMPI 3.1.0 to ofi\n if dep == 'libfabric' and LooseVersion(self.version) >= LooseVersion('3.1'):\n opt_name = 'ofi'\n # Check new option name. They are synonyms since 3.1.0 for backward compatibility\n if config_opt_used(opt_name):\n continue\n\n dep_root = get_software_root(dep)\n # If the dependency is loaded, specify its path, else use the \"unused\" value, if any\n if dep_root:\n opt_value = dep_root\n else:\n opt_value = unused_dep_value.get(dep)\n if opt_value is not None:\n self.cfg.update('configopts', '--with-%s=%s' % (opt_name, opt_value))\n\n if bool(get_software_root('PMIx')) != bool(get_software_root('libevent')):\n raise EasyBuildError('You must either use both PMIx and libevent as dependencies or none of them. '\n 'This is to enforce the same libevent is used for OpenMPI as for PMIx or '\n 'the behavior may be unpredictable.')\n\n # check whether VERBS support should be enabled\n if not config_opt_used('verbs'):\n\n # for OpenMPI v4.x, the openib BTL should be disabled when UCX is used;\n # this is required to avoid \"error initializing an OpenFabrics device\" warnings,\n # see also https://www.open-mpi.org/faq/?category=all#ofa-device-error\n is_ucx_enabled = ('--with-ucx' in self.cfg['configopts'] and\n '--with-ucx=no' not in self.cfg['configopts'])\n if LooseVersion(self.version) >= LooseVersion('4.0.0') and is_ucx_enabled:\n verbs = False\n else:\n # auto-detect based on available OS packages\n os_packages = EASYCONFIG_CONSTANTS['OS_PKG_IBVERBS_DEV'][0]\n verbs = any(check_os_dependency(osdep) for osdep in os_packages)\n # for OpenMPI v5.x, the verbs support is removed, only UCX is available\n # see https://github.com/open-mpi/ompi/pull/6270\n if LooseVersion(self.version) <= LooseVersion('5.0.0'):\n if verbs:\n self.cfg.update('configopts', '--with-verbs')\n else:\n self.cfg.update('configopts', '--without-verbs')\n\n super(EB_OpenMPI, self).configure_step()\n\n def test_step(self):\n \"\"\"Test step for OpenMPI\"\"\"\n # Default to `make check` if nothing is set. Disable with \"runtest = False\" in the EC\n if self.cfg['runtest'] is None:\n self.cfg['runtest'] = 'check'\n\n super(EB_OpenMPI, self).test_step()\n\n def load_module(self, *args, **kwargs):\n \"\"\"\n Load (temporary) module file, after resetting to initial environment.\n\n Also put RPATH wrappers back in place if needed, to ensure that sanity check commands work as expected.\n \"\"\"\n super(EB_OpenMPI, self).load_module(*args, **kwargs)\n\n # ensure RPATH wrappers are in place, otherwise compiling minimal test programs will fail\n if build_option('rpath'):\n if self.toolchain.options.get('rpath', True):\n self.toolchain.prepare_rpath_wrappers(rpath_filter_dirs=self.rpath_filter_dirs,\n rpath_include_dirs=self.rpath_include_dirs)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for OpenMPI.\"\"\"\n\n bin_names = ['mpicc', 'mpicxx', 'mpif90', 'mpifort', 'mpirun', 'ompi_info', 'opal_wrapper']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n bin_names.append('prterun')\n else:\n bin_names.append('orterun')\n bin_files = [os.path.join('bin', x) for x in bin_names]\n\n shlib_ext = get_shared_lib_ext()\n lib_names = ['mpi_mpifh', 'mpi', 'open-pal']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n lib_names.append('prrte')\n else:\n lib_names.extend(['ompitrace', 'open-rte'])\n lib_files = [os.path.join('lib', 'lib%s.%s' % (x, shlib_ext)) for x in lib_names]\n\n inc_names = ['mpi-ext', 'mpif-config', 'mpif', 'mpi', 'mpi_portable_platform']\n if LooseVersion(self.version) >= LooseVersion('5.0.0'):\n inc_names.append('prte')\n inc_files = [os.path.join('include', x + '.h') for x in inc_names]\n\n custom_paths = {\n 'files': bin_files + inc_files + lib_files,\n 'dirs': [],\n }\n\n # make sure MPI compiler wrappers pick up correct compilers\n expected = {\n 'mpicc': os.getenv('CC', 'gcc'),\n 'mpicxx': os.getenv('CXX', 'g++'),\n 'mpifort': os.getenv('FC', 'gfortran'),\n 'mpif90': os.getenv('F90', 'gfortran'),\n }\n # actual pattern for gfortran is \"GNU Fortran\"\n for key in ['mpifort', 'mpif90']:\n if expected[key] == 'gfortran':\n expected[key] = \"GNU Fortran\"\n # for PGI, correct pattern is \"pgfortran\" with mpif90\n if expected['mpif90'] == 'pgf90':\n expected['mpif90'] = 'pgfortran'\n # for Clang the pattern is always clang\n for key in ['mpicxx', 'mpifort', 'mpif90']:\n if expected[key] in ['clang++', 'flang']:\n expected[key] = 'clang'\n\n custom_commands = [\"%s --version | grep '%s'\" % (key, expected[key]) for key in sorted(expected.keys())]\n\n # Add minimal test program to sanity checks\n # Run with correct MPI launcher\n mpi_cmd_tmpl, params = get_mpi_cmd_template(toolchain.OPENMPI, dict(), mpi_version=self.version)\n # Limit number of ranks to 8 to avoid it failing due to hyperthreading\n ranks = min(8, self.cfg['parallel'])\n for srcdir, src, compiler in (\n ('examples', 'hello_c.c', 'mpicc'),\n ('examples', 'hello_mpifh.f', 'mpifort'),\n ('examples', 'hello_usempi.f90', 'mpif90'),\n ('examples', 'ring_c.c', 'mpicc'),\n ('examples', 'ring_mpifh.f', 'mpifort'),\n ('examples', 'ring_usempi.f90', 'mpif90'),\n ('test/simple', 'thread_init.c', 'mpicc'),\n ('test/simple', 'intercomm1.c', 'mpicc'),\n ('test/simple', 'mpi_barrier.c', 'mpicc'),\n ):\n src_path = os.path.join(self.cfg['start_dir'], srcdir, src)\n if os.path.exists(src_path):\n test_exe = os.path.join(self.builddir, 'mpi_test_' + os.path.splitext(src)[0])\n self.log.info(\"Adding minimal MPI test program to sanity checks: %s\", test_exe)\n\n # Build test binary\n custom_commands.append(\"%s %s -o %s\" % (compiler, src_path, test_exe))\n\n # Run the test if chosen\n if build_option('mpi_tests'):\n params.update({'nr_ranks': ranks, 'cmd': test_exe})\n # Allow oversubscription for this test (in case of hyperthreading)\n custom_commands.append(\"OMPI_MCA_rmaps_base_oversubscribe=1 \" + mpi_cmd_tmpl % params)\n # Run with 1 process which may trigger other bugs\n # See https://github.com/easybuilders/easybuild-easyconfigs/issues/12978\n params['nr_ranks'] = 1\n custom_commands.append(mpi_cmd_tmpl % params)\n\n super(EB_OpenMPI, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n", "path": "easybuild/easyblocks/o/openmpi.py"}]}
| 3,728 | 243 |
gh_patches_debug_17129
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-3400
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove copyright application
This was brought up in review and not handled. Doesn't look like we need it any longer?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/settings/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # pylint: disable=missing-docstring
3
4 from __future__ import (
5 absolute_import, division, print_function, unicode_literals)
6
7 import os
8
9 from readthedocs.core.settings import Settings
10
11 try:
12 import readthedocsext # noqa
13 ext = True
14 except ImportError:
15 ext = False
16
17
18 _ = gettext = lambda s: s
19
20
21 class CommunityBaseSettings(Settings):
22
23 """Community base settings, don't use this directly."""
24
25 # Django settings
26 SITE_ID = 1
27 ROOT_URLCONF = 'readthedocs.urls'
28 SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'
29 LOGIN_REDIRECT_URL = '/dashboard/'
30 FORCE_WWW = False
31 SECRET_KEY = 'replace-this-please' # noqa
32 ATOMIC_REQUESTS = True
33
34 # Debug settings
35 DEBUG = True
36 TEMPLATE_DEBUG = DEBUG
37 TASTYPIE_FULL_DEBUG = True
38
39 # Domains and URLs
40 PRODUCTION_DOMAIN = 'readthedocs.org'
41 PUBLIC_DOMAIN = None
42 USE_SUBDOMAIN = False
43 PUBLIC_API_URL = 'https://{0}'.format(PRODUCTION_DOMAIN)
44
45 ADMINS = (
46 ('Eric Holscher', '[email protected]'),
47 ('Anthony Johnson', '[email protected]'),
48 )
49 MANAGERS = ADMINS
50
51 # Email
52 DEFAULT_FROM_EMAIL = '[email protected]'
53 SERVER_EMAIL = DEFAULT_FROM_EMAIL
54
55 # Cookies
56 SESSION_COOKIE_DOMAIN = 'readthedocs.org'
57 SESSION_COOKIE_HTTPONLY = True
58 CSRF_COOKIE_HTTPONLY = True
59
60 # Application classes
61 @property
62 def INSTALLED_APPS(self): # noqa
63 apps = [
64 'django.contrib.auth',
65 'django.contrib.admin',
66 'django.contrib.contenttypes',
67 'django.contrib.sessions',
68 'django.contrib.sites',
69 'django.contrib.staticfiles',
70 'django.contrib.messages',
71 'django.contrib.humanize',
72
73 # third party apps
74 'linaro_django_pagination',
75 'taggit',
76 'guardian',
77 'django_gravatar',
78 'rest_framework',
79 'corsheaders',
80 'copyright',
81 'textclassifier',
82 'annoying',
83 'django_extensions',
84 'messages_extends',
85
86 # daniellindsleyrocksdahouse
87 'haystack',
88 'tastypie',
89
90 # our apps
91 'readthedocs.bookmarks',
92 'readthedocs.projects',
93 'readthedocs.builds',
94 'readthedocs.comments',
95 'readthedocs.core',
96 'readthedocs.doc_builder',
97 'readthedocs.oauth',
98 'readthedocs.redirects',
99 'readthedocs.rtd_tests',
100 'readthedocs.restapi',
101 'readthedocs.gold',
102 'readthedocs.payments',
103 'readthedocs.notifications',
104 'readthedocs.integrations',
105
106
107 # allauth
108 'allauth',
109 'allauth.account',
110 'allauth.socialaccount',
111 'allauth.socialaccount.providers.github',
112 'allauth.socialaccount.providers.gitlab',
113 'allauth.socialaccount.providers.bitbucket',
114 'allauth.socialaccount.providers.bitbucket_oauth2',
115 ]
116 if ext:
117 apps.append('django_countries')
118 apps.append('readthedocsext.donate')
119 apps.append('readthedocsext.embed')
120 return apps
121
122 TEMPLATE_LOADERS = (
123 'django.template.loaders.filesystem.Loader',
124 'django.template.loaders.app_directories.Loader',
125 )
126
127 MIDDLEWARE_CLASSES = (
128 'readthedocs.core.middleware.ProxyMiddleware',
129 'readthedocs.core.middleware.FooterNoSessionMiddleware',
130 'django.middleware.locale.LocaleMiddleware',
131 'django.middleware.common.CommonMiddleware',
132 'django.middleware.security.SecurityMiddleware',
133 'django.middleware.csrf.CsrfViewMiddleware',
134 'django.contrib.auth.middleware.AuthenticationMiddleware',
135 'django.contrib.messages.middleware.MessageMiddleware',
136 'linaro_django_pagination.middleware.PaginationMiddleware',
137 'readthedocs.core.middleware.SubdomainMiddleware',
138 'readthedocs.core.middleware.SingleVersionMiddleware',
139 'corsheaders.middleware.CorsMiddleware',
140 )
141
142 AUTHENTICATION_BACKENDS = (
143 # Needed to login by username in Django admin, regardless of `allauth`
144 'django.contrib.auth.backends.ModelBackend',
145 # `allauth` specific authentication methods, such as login by e-mail
146 'allauth.account.auth_backends.AuthenticationBackend',
147 )
148
149 TEMPLATE_CONTEXT_PROCESSORS = (
150 'django.contrib.auth.context_processors.auth',
151 'django.contrib.messages.context_processors.messages',
152 'django.core.context_processors.debug',
153 'django.core.context_processors.i18n',
154 'django.core.context_processors.media',
155 'django.core.context_processors.request',
156 # Read the Docs processor
157 'readthedocs.core.context_processors.readthedocs_processor',
158 )
159
160 MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'
161
162 NOTIFICATION_BACKENDS = [
163 'readthedocs.notifications.backends.EmailBackend',
164 'readthedocs.notifications.backends.SiteBackend',
165 ]
166
167 # Paths
168 SITE_ROOT = os.path.dirname(
169 os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
170 TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')
171 DOCROOT = os.path.join(SITE_ROOT, 'user_builds')
172 UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')
173 CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')
174 LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')
175 PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')
176 PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')
177
178 # Assets and media
179 STATIC_ROOT = os.path.join(SITE_ROOT, 'media/static/')
180 STATIC_URL = '/static/'
181 MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
182 MEDIA_URL = '/media/'
183 ADMIN_MEDIA_PREFIX = '/media/admin/'
184 STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'readthedocs', 'static')]
185 TEMPLATE_DIRS = (
186 TEMPLATE_ROOT,
187 )
188
189 # Cache
190 CACHES = {
191 'default': {
192 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
193 'PREFIX': 'docs',
194 }
195 }
196 CACHE_MIDDLEWARE_SECONDS = 60
197
198 # I18n
199 TIME_ZONE = 'America/Chicago'
200 LANGUAGE_CODE = 'en-us'
201 LANGUAGES = (
202 ('ca', gettext('Catalan')),
203 ('en', gettext('English')),
204 ('es', gettext('Spanish')),
205 ('pt-br', gettext('Brazilian Portuguese')),
206 ('nb', gettext('Norwegian Bokmål')),
207 ('fr', gettext('French')),
208 ('ru', gettext('Russian')),
209 ('de', gettext('German')),
210 ('gl', gettext('Galician')),
211 ('vi', gettext('Vietnamese')),
212 ('zh-cn', gettext('Chinese')),
213 ('zh-tw', gettext('Taiwanese')),
214 ('ja', gettext('Japanese')),
215 ('uk', gettext('Ukrainian')),
216 ('it', gettext('Italian')),
217 ('ko', gettext('Korean')),
218 )
219 LOCALE_PATHS = [
220 os.path.join(SITE_ROOT, 'readthedocs', 'locale'),
221 ]
222 USE_I18N = True
223 USE_L10N = True
224
225 # Celery
226 CELERY_ALWAYS_EAGER = True
227 CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes
228 CELERY_SEND_TASK_ERROR_EMAILS = False
229 CELERYD_HIJACK_ROOT_LOGGER = False
230 # Don't queue a bunch of tasks in the workers
231 CELERYD_PREFETCH_MULTIPLIER = 1
232 CELERY_CREATE_MISSING_QUEUES = True
233
234 CELERY_DEFAULT_QUEUE = 'celery'
235
236 # Docker
237 DOCKER_ENABLE = False
238 DOCKER_IMAGE = 'readthedocs/build:2.0'
239
240 # All auth
241 ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'
242 ACCOUNT_EMAIL_REQUIRED = True
243 ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
244 ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
245 ACCOUNT_ACTIVATION_DAYS = 7
246 SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
247 SOCIALACCOUNT_AUTO_SIGNUP = False
248 SOCIALACCOUNT_PROVIDERS = {
249 'github': {
250 'SCOPE': [
251 'user:email',
252 'read:org',
253 'admin:repo_hook',
254 'repo:status',
255 ],
256 },
257 'gitlab': {
258 'SCOPE': [
259 'api',
260 'read_user',
261 ],
262 },
263 }
264
265 # CORS
266 CORS_ORIGIN_REGEX_WHITELIST = (
267 '^http://(.+)\.readthedocs\.io$',
268 '^https://(.+)\.readthedocs\.io$'
269 )
270 # So people can post to their accounts
271 CORS_ALLOW_CREDENTIALS = True
272 CORS_ALLOW_HEADERS = (
273 'x-requested-with',
274 'content-type',
275 'accept',
276 'origin',
277 'authorization',
278 'x-csrftoken'
279 )
280
281 # RTD Settings
282 REPO_LOCK_SECONDS = 30
283 ALLOW_PRIVATE_REPOS = False
284 GROK_API_HOST = 'https://api.grokthedocs.com'
285 SERVE_DOCS = ['public']
286
287 # Haystack
288 HAYSTACK_CONNECTIONS = {
289 'default': {
290 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
291 },
292 }
293
294 # Elasticsearch settings.
295 ES_HOSTS = ['127.0.0.1:9200']
296 ES_DEFAULT_NUM_REPLICAS = 0
297 ES_DEFAULT_NUM_SHARDS = 5
298
299 ALLOWED_HOSTS = ['*']
300
301 ABSOLUTE_URL_OVERRIDES = {
302 'auth.user': lambda o: '/profiles/{}/'.format(o.username)
303 }
304
305 INTERNAL_IPS = ('127.0.0.1',)
306
307 # Guardian Settings
308 GUARDIAN_RAISE_403 = True
309 ANONYMOUS_USER_ID = -1
310
311 # Stripe
312 STRIPE_SECRET = None
313 STRIPE_PUBLISHABLE = None
314
315 # Misc application settings
316 GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'
317 GRAVATAR_DEFAULT_IMAGE = 'https://media.readthedocs.org/images/silhouette.png' # NOQA
318 COPY_START_YEAR = 2010
319 RESTRICTEDSESSIONS_AUTHED_ONLY = True
320 RESTRUCTUREDTEXT_FILTER_SETTINGS = {
321 'cloak_email_addresses': True,
322 'file_insertion_enabled': False,
323 'raw_enabled': False,
324 'strip_comments': True,
325 'doctitle_xform': True,
326 'sectsubtitle_xform': True,
327 'initial_header_level': 2,
328 'report_level': 5,
329 'syntax_highlight': 'none',
330 'math_output': 'latex',
331 'field_name_limit': 50,
332 }
333 REST_FRAMEWORK = {
334 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # NOQA
335 'PAGE_SIZE': 10,
336 }
337 SILENCED_SYSTEM_CHECKS = ['fields.W342']
338
339 # Logging
340 LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
341 LOGGING = {
342 'version': 1,
343 'disable_existing_loggers': True,
344 'formatters': {
345 'default': {
346 'format': LOG_FORMAT,
347 'datefmt': '%d/%b/%Y %H:%M:%S',
348 },
349 },
350 'handlers': {
351 'console': {
352 'level': 'INFO',
353 'class': 'logging.StreamHandler',
354 'formatter': 'default'
355 },
356 'debug': {
357 'level': 'DEBUG',
358 'class': 'logging.handlers.RotatingFileHandler',
359 'filename': os.path.join(LOGS_ROOT, 'debug.log'),
360 'formatter': 'default',
361 },
362 },
363 'loggers': {
364 '': { # root logger
365 'handlers': ['debug', 'console'],
366 # Always send from the root, handlers can filter levels
367 'level': 'DEBUG',
368 },
369 'readthedocs': {
370 'handlers': ['debug', 'console'],
371 'level': 'DEBUG',
372 # Don't double log at the root logger for these.
373 'propagate': False,
374 },
375 },
376 }
377
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py
--- a/readthedocs/settings/base.py
+++ b/readthedocs/settings/base.py
@@ -77,7 +77,6 @@
'django_gravatar',
'rest_framework',
'corsheaders',
- 'copyright',
'textclassifier',
'annoying',
'django_extensions',
@@ -315,7 +314,6 @@
# Misc application settings
GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'
GRAVATAR_DEFAULT_IMAGE = 'https://media.readthedocs.org/images/silhouette.png' # NOQA
- COPY_START_YEAR = 2010
RESTRICTEDSESSIONS_AUTHED_ONLY = True
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
|
{"golden_diff": "diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py\n--- a/readthedocs/settings/base.py\n+++ b/readthedocs/settings/base.py\n@@ -77,7 +77,6 @@\n 'django_gravatar',\n 'rest_framework',\n 'corsheaders',\n- 'copyright',\n 'textclassifier',\n 'annoying',\n 'django_extensions',\n@@ -315,7 +314,6 @@\n # Misc application settings\n GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'\n GRAVATAR_DEFAULT_IMAGE = 'https://media.readthedocs.org/images/silhouette.png' # NOQA\n- COPY_START_YEAR = 2010\n RESTRICTEDSESSIONS_AUTHED_ONLY = True\n RESTRUCTUREDTEXT_FILTER_SETTINGS = {\n 'cloak_email_addresses': True,\n", "issue": "Remove copyright application\nThis was brought up in review and not handled. Doesn't look like we need it any longer?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# pylint: disable=missing-docstring\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport os\n\nfrom readthedocs.core.settings import Settings\n\ntry:\n import readthedocsext # noqa\n ext = True\nexcept ImportError:\n ext = False\n\n\n_ = gettext = lambda s: s\n\n\nclass CommunityBaseSettings(Settings):\n\n \"\"\"Community base settings, don't use this directly.\"\"\"\n\n # Django settings\n SITE_ID = 1\n ROOT_URLCONF = 'readthedocs.urls'\n SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'\n LOGIN_REDIRECT_URL = '/dashboard/'\n FORCE_WWW = False\n SECRET_KEY = 'replace-this-please' # noqa\n ATOMIC_REQUESTS = True\n\n # Debug settings\n DEBUG = True\n TEMPLATE_DEBUG = DEBUG\n TASTYPIE_FULL_DEBUG = True\n\n # Domains and URLs\n PRODUCTION_DOMAIN = 'readthedocs.org'\n PUBLIC_DOMAIN = None\n USE_SUBDOMAIN = False\n PUBLIC_API_URL = 'https://{0}'.format(PRODUCTION_DOMAIN)\n\n ADMINS = (\n ('Eric Holscher', '[email protected]'),\n ('Anthony Johnson', '[email protected]'),\n )\n MANAGERS = ADMINS\n\n # Email\n DEFAULT_FROM_EMAIL = '[email protected]'\n SERVER_EMAIL = DEFAULT_FROM_EMAIL\n\n # Cookies\n SESSION_COOKIE_DOMAIN = 'readthedocs.org'\n SESSION_COOKIE_HTTPONLY = True\n CSRF_COOKIE_HTTPONLY = True\n\n # Application classes\n @property\n def INSTALLED_APPS(self): # noqa\n apps = [\n 'django.contrib.auth',\n 'django.contrib.admin',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.messages',\n 'django.contrib.humanize',\n\n # third party apps\n 'linaro_django_pagination',\n 'taggit',\n 'guardian',\n 'django_gravatar',\n 'rest_framework',\n 'corsheaders',\n 'copyright',\n 'textclassifier',\n 'annoying',\n 'django_extensions',\n 'messages_extends',\n\n # daniellindsleyrocksdahouse\n 'haystack',\n 'tastypie',\n\n # our apps\n 'readthedocs.bookmarks',\n 'readthedocs.projects',\n 'readthedocs.builds',\n 'readthedocs.comments',\n 'readthedocs.core',\n 'readthedocs.doc_builder',\n 'readthedocs.oauth',\n 'readthedocs.redirects',\n 'readthedocs.rtd_tests',\n 'readthedocs.restapi',\n 'readthedocs.gold',\n 'readthedocs.payments',\n 'readthedocs.notifications',\n 'readthedocs.integrations',\n\n\n # allauth\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.gitlab',\n 'allauth.socialaccount.providers.bitbucket',\n 'allauth.socialaccount.providers.bitbucket_oauth2',\n ]\n if ext:\n apps.append('django_countries')\n apps.append('readthedocsext.donate')\n apps.append('readthedocsext.embed')\n return apps\n\n TEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )\n\n MIDDLEWARE_CLASSES = (\n 'readthedocs.core.middleware.ProxyMiddleware',\n 'readthedocs.core.middleware.FooterNoSessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'linaro_django_pagination.middleware.PaginationMiddleware',\n 'readthedocs.core.middleware.SubdomainMiddleware',\n 'readthedocs.core.middleware.SingleVersionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n )\n\n AUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n )\n\n TEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.request',\n # Read the Docs processor\n 'readthedocs.core.context_processors.readthedocs_processor',\n )\n\n MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'\n\n NOTIFICATION_BACKENDS = [\n 'readthedocs.notifications.backends.EmailBackend',\n 'readthedocs.notifications.backends.SiteBackend',\n ]\n\n # Paths\n SITE_ROOT = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')\n DOCROOT = os.path.join(SITE_ROOT, 'user_builds')\n UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')\n CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')\n LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')\n PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')\n PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')\n\n # Assets and media\n STATIC_ROOT = os.path.join(SITE_ROOT, 'media/static/')\n STATIC_URL = '/static/'\n MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')\n MEDIA_URL = '/media/'\n ADMIN_MEDIA_PREFIX = '/media/admin/'\n STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'readthedocs', 'static')]\n TEMPLATE_DIRS = (\n TEMPLATE_ROOT,\n )\n\n # Cache\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n 'PREFIX': 'docs',\n }\n }\n CACHE_MIDDLEWARE_SECONDS = 60\n\n # I18n\n TIME_ZONE = 'America/Chicago'\n LANGUAGE_CODE = 'en-us'\n LANGUAGES = (\n ('ca', gettext('Catalan')),\n ('en', gettext('English')),\n ('es', gettext('Spanish')),\n ('pt-br', gettext('Brazilian Portuguese')),\n ('nb', gettext('Norwegian Bokm\u00e5l')),\n ('fr', gettext('French')),\n ('ru', gettext('Russian')),\n ('de', gettext('German')),\n ('gl', gettext('Galician')),\n ('vi', gettext('Vietnamese')),\n ('zh-cn', gettext('Chinese')),\n ('zh-tw', gettext('Taiwanese')),\n ('ja', gettext('Japanese')),\n ('uk', gettext('Ukrainian')),\n ('it', gettext('Italian')),\n ('ko', gettext('Korean')),\n )\n LOCALE_PATHS = [\n os.path.join(SITE_ROOT, 'readthedocs', 'locale'),\n ]\n USE_I18N = True\n USE_L10N = True\n\n # Celery\n CELERY_ALWAYS_EAGER = True\n CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes\n CELERY_SEND_TASK_ERROR_EMAILS = False\n CELERYD_HIJACK_ROOT_LOGGER = False\n # Don't queue a bunch of tasks in the workers\n CELERYD_PREFETCH_MULTIPLIER = 1\n CELERY_CREATE_MISSING_QUEUES = True\n\n CELERY_DEFAULT_QUEUE = 'celery'\n\n # Docker\n DOCKER_ENABLE = False\n DOCKER_IMAGE = 'readthedocs/build:2.0'\n\n # All auth\n ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'\n ACCOUNT_EMAIL_REQUIRED = True\n ACCOUNT_EMAIL_VERIFICATION = 'mandatory'\n ACCOUNT_AUTHENTICATION_METHOD = 'username_email'\n ACCOUNT_ACTIVATION_DAYS = 7\n SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n SOCIALACCOUNT_AUTO_SIGNUP = False\n SOCIALACCOUNT_PROVIDERS = {\n 'github': {\n 'SCOPE': [\n 'user:email',\n 'read:org',\n 'admin:repo_hook',\n 'repo:status',\n ],\n },\n 'gitlab': {\n 'SCOPE': [\n 'api',\n 'read_user',\n ],\n },\n }\n\n # CORS\n CORS_ORIGIN_REGEX_WHITELIST = (\n '^http://(.+)\\.readthedocs\\.io$',\n '^https://(.+)\\.readthedocs\\.io$'\n )\n # So people can post to their accounts\n CORS_ALLOW_CREDENTIALS = True\n CORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken'\n )\n\n # RTD Settings\n REPO_LOCK_SECONDS = 30\n ALLOW_PRIVATE_REPOS = False\n GROK_API_HOST = 'https://api.grokthedocs.com'\n SERVE_DOCS = ['public']\n\n # Haystack\n HAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n },\n }\n\n # Elasticsearch settings.\n ES_HOSTS = ['127.0.0.1:9200']\n ES_DEFAULT_NUM_REPLICAS = 0\n ES_DEFAULT_NUM_SHARDS = 5\n\n ALLOWED_HOSTS = ['*']\n\n ABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda o: '/profiles/{}/'.format(o.username)\n }\n\n INTERNAL_IPS = ('127.0.0.1',)\n\n # Guardian Settings\n GUARDIAN_RAISE_403 = True\n ANONYMOUS_USER_ID = -1\n\n # Stripe\n STRIPE_SECRET = None\n STRIPE_PUBLISHABLE = None\n\n # Misc application settings\n GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'\n GRAVATAR_DEFAULT_IMAGE = 'https://media.readthedocs.org/images/silhouette.png' # NOQA\n COPY_START_YEAR = 2010\n RESTRICTEDSESSIONS_AUTHED_ONLY = True\n RESTRUCTUREDTEXT_FILTER_SETTINGS = {\n 'cloak_email_addresses': True,\n 'file_insertion_enabled': False,\n 'raw_enabled': False,\n 'strip_comments': True,\n 'doctitle_xform': True,\n 'sectsubtitle_xform': True,\n 'initial_header_level': 2,\n 'report_level': 5,\n 'syntax_highlight': 'none',\n 'math_output': 'latex',\n 'field_name_limit': 50,\n }\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # NOQA\n 'PAGE_SIZE': 10,\n }\n SILENCED_SYSTEM_CHECKS = ['fields.W342']\n\n # Logging\n LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'default': {\n 'format': LOG_FORMAT,\n 'datefmt': '%d/%b/%Y %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'debug': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOGS_ROOT, 'debug.log'),\n 'formatter': 'default',\n },\n },\n 'loggers': {\n '': { # root logger\n 'handlers': ['debug', 'console'],\n # Always send from the root, handlers can filter levels\n 'level': 'DEBUG',\n },\n 'readthedocs': {\n 'handlers': ['debug', 'console'],\n 'level': 'DEBUG',\n # Don't double log at the root logger for these.\n 'propagate': False,\n },\n },\n }\n", "path": "readthedocs/settings/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# pylint: disable=missing-docstring\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport os\n\nfrom readthedocs.core.settings import Settings\n\ntry:\n import readthedocsext # noqa\n ext = True\nexcept ImportError:\n ext = False\n\n\n_ = gettext = lambda s: s\n\n\nclass CommunityBaseSettings(Settings):\n\n \"\"\"Community base settings, don't use this directly.\"\"\"\n\n # Django settings\n SITE_ID = 1\n ROOT_URLCONF = 'readthedocs.urls'\n SUBDOMAIN_URLCONF = 'readthedocs.core.urls.subdomain'\n LOGIN_REDIRECT_URL = '/dashboard/'\n FORCE_WWW = False\n SECRET_KEY = 'replace-this-please' # noqa\n ATOMIC_REQUESTS = True\n\n # Debug settings\n DEBUG = True\n TEMPLATE_DEBUG = DEBUG\n TASTYPIE_FULL_DEBUG = True\n\n # Domains and URLs\n PRODUCTION_DOMAIN = 'readthedocs.org'\n PUBLIC_DOMAIN = None\n USE_SUBDOMAIN = False\n PUBLIC_API_URL = 'https://{0}'.format(PRODUCTION_DOMAIN)\n\n ADMINS = (\n ('Eric Holscher', '[email protected]'),\n ('Anthony Johnson', '[email protected]'),\n )\n MANAGERS = ADMINS\n\n # Email\n DEFAULT_FROM_EMAIL = '[email protected]'\n SERVER_EMAIL = DEFAULT_FROM_EMAIL\n\n # Cookies\n SESSION_COOKIE_DOMAIN = 'readthedocs.org'\n SESSION_COOKIE_HTTPONLY = True\n CSRF_COOKIE_HTTPONLY = True\n\n # Application classes\n @property\n def INSTALLED_APPS(self): # noqa\n apps = [\n 'django.contrib.auth',\n 'django.contrib.admin',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.messages',\n 'django.contrib.humanize',\n\n # third party apps\n 'linaro_django_pagination',\n 'taggit',\n 'guardian',\n 'django_gravatar',\n 'rest_framework',\n 'corsheaders',\n 'textclassifier',\n 'annoying',\n 'django_extensions',\n 'messages_extends',\n\n # daniellindsleyrocksdahouse\n 'haystack',\n 'tastypie',\n\n # our apps\n 'readthedocs.bookmarks',\n 'readthedocs.projects',\n 'readthedocs.builds',\n 'readthedocs.comments',\n 'readthedocs.core',\n 'readthedocs.doc_builder',\n 'readthedocs.oauth',\n 'readthedocs.redirects',\n 'readthedocs.rtd_tests',\n 'readthedocs.restapi',\n 'readthedocs.gold',\n 'readthedocs.payments',\n 'readthedocs.notifications',\n 'readthedocs.integrations',\n\n\n # allauth\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.github',\n 'allauth.socialaccount.providers.gitlab',\n 'allauth.socialaccount.providers.bitbucket',\n 'allauth.socialaccount.providers.bitbucket_oauth2',\n ]\n if ext:\n apps.append('django_countries')\n apps.append('readthedocsext.donate')\n apps.append('readthedocsext.embed')\n return apps\n\n TEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )\n\n MIDDLEWARE_CLASSES = (\n 'readthedocs.core.middleware.ProxyMiddleware',\n 'readthedocs.core.middleware.FooterNoSessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'linaro_django_pagination.middleware.PaginationMiddleware',\n 'readthedocs.core.middleware.SubdomainMiddleware',\n 'readthedocs.core.middleware.SingleVersionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n )\n\n AUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n )\n\n TEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.request',\n # Read the Docs processor\n 'readthedocs.core.context_processors.readthedocs_processor',\n )\n\n MESSAGE_STORAGE = 'readthedocs.notifications.storages.FallbackUniqueStorage'\n\n NOTIFICATION_BACKENDS = [\n 'readthedocs.notifications.backends.EmailBackend',\n 'readthedocs.notifications.backends.SiteBackend',\n ]\n\n # Paths\n SITE_ROOT = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')\n DOCROOT = os.path.join(SITE_ROOT, 'user_builds')\n UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')\n CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')\n LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')\n PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')\n PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')\n\n # Assets and media\n STATIC_ROOT = os.path.join(SITE_ROOT, 'media/static/')\n STATIC_URL = '/static/'\n MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')\n MEDIA_URL = '/media/'\n ADMIN_MEDIA_PREFIX = '/media/admin/'\n STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'readthedocs', 'static')]\n TEMPLATE_DIRS = (\n TEMPLATE_ROOT,\n )\n\n # Cache\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n 'PREFIX': 'docs',\n }\n }\n CACHE_MIDDLEWARE_SECONDS = 60\n\n # I18n\n TIME_ZONE = 'America/Chicago'\n LANGUAGE_CODE = 'en-us'\n LANGUAGES = (\n ('ca', gettext('Catalan')),\n ('en', gettext('English')),\n ('es', gettext('Spanish')),\n ('pt-br', gettext('Brazilian Portuguese')),\n ('nb', gettext('Norwegian Bokm\u00e5l')),\n ('fr', gettext('French')),\n ('ru', gettext('Russian')),\n ('de', gettext('German')),\n ('gl', gettext('Galician')),\n ('vi', gettext('Vietnamese')),\n ('zh-cn', gettext('Chinese')),\n ('zh-tw', gettext('Taiwanese')),\n ('ja', gettext('Japanese')),\n ('uk', gettext('Ukrainian')),\n ('it', gettext('Italian')),\n ('ko', gettext('Korean')),\n )\n LOCALE_PATHS = [\n os.path.join(SITE_ROOT, 'readthedocs', 'locale'),\n ]\n USE_I18N = True\n USE_L10N = True\n\n # Celery\n CELERY_ALWAYS_EAGER = True\n CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes\n CELERY_SEND_TASK_ERROR_EMAILS = False\n CELERYD_HIJACK_ROOT_LOGGER = False\n # Don't queue a bunch of tasks in the workers\n CELERYD_PREFETCH_MULTIPLIER = 1\n CELERY_CREATE_MISSING_QUEUES = True\n\n CELERY_DEFAULT_QUEUE = 'celery'\n\n # Docker\n DOCKER_ENABLE = False\n DOCKER_IMAGE = 'readthedocs/build:2.0'\n\n # All auth\n ACCOUNT_ADAPTER = 'readthedocs.core.adapters.AccountAdapter'\n ACCOUNT_EMAIL_REQUIRED = True\n ACCOUNT_EMAIL_VERIFICATION = 'mandatory'\n ACCOUNT_AUTHENTICATION_METHOD = 'username_email'\n ACCOUNT_ACTIVATION_DAYS = 7\n SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n SOCIALACCOUNT_AUTO_SIGNUP = False\n SOCIALACCOUNT_PROVIDERS = {\n 'github': {\n 'SCOPE': [\n 'user:email',\n 'read:org',\n 'admin:repo_hook',\n 'repo:status',\n ],\n },\n 'gitlab': {\n 'SCOPE': [\n 'api',\n 'read_user',\n ],\n },\n }\n\n # CORS\n CORS_ORIGIN_REGEX_WHITELIST = (\n '^http://(.+)\\.readthedocs\\.io$',\n '^https://(.+)\\.readthedocs\\.io$'\n )\n # So people can post to their accounts\n CORS_ALLOW_CREDENTIALS = True\n CORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken'\n )\n\n # RTD Settings\n REPO_LOCK_SECONDS = 30\n ALLOW_PRIVATE_REPOS = False\n GROK_API_HOST = 'https://api.grokthedocs.com'\n SERVE_DOCS = ['public']\n\n # Haystack\n HAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',\n },\n }\n\n # Elasticsearch settings.\n ES_HOSTS = ['127.0.0.1:9200']\n ES_DEFAULT_NUM_REPLICAS = 0\n ES_DEFAULT_NUM_SHARDS = 5\n\n ALLOWED_HOSTS = ['*']\n\n ABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda o: '/profiles/{}/'.format(o.username)\n }\n\n INTERNAL_IPS = ('127.0.0.1',)\n\n # Guardian Settings\n GUARDIAN_RAISE_403 = True\n ANONYMOUS_USER_ID = -1\n\n # Stripe\n STRIPE_SECRET = None\n STRIPE_PUBLISHABLE = None\n\n # Misc application settings\n GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'\n GRAVATAR_DEFAULT_IMAGE = 'https://media.readthedocs.org/images/silhouette.png' # NOQA\n RESTRICTEDSESSIONS_AUTHED_ONLY = True\n RESTRUCTUREDTEXT_FILTER_SETTINGS = {\n 'cloak_email_addresses': True,\n 'file_insertion_enabled': False,\n 'raw_enabled': False,\n 'strip_comments': True,\n 'doctitle_xform': True,\n 'sectsubtitle_xform': True,\n 'initial_header_level': 2,\n 'report_level': 5,\n 'syntax_highlight': 'none',\n 'math_output': 'latex',\n 'field_name_limit': 50,\n }\n REST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', # NOQA\n 'PAGE_SIZE': 10,\n }\n SILENCED_SYSTEM_CHECKS = ['fields.W342']\n\n # Logging\n LOG_FORMAT = '%(name)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'\n LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'default': {\n 'format': LOG_FORMAT,\n 'datefmt': '%d/%b/%Y %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'debug': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(LOGS_ROOT, 'debug.log'),\n 'formatter': 'default',\n },\n },\n 'loggers': {\n '': { # root logger\n 'handlers': ['debug', 'console'],\n # Always send from the root, handlers can filter levels\n 'level': 'DEBUG',\n },\n 'readthedocs': {\n 'handlers': ['debug', 'console'],\n 'level': 'DEBUG',\n # Don't double log at the root logger for these.\n 'propagate': False,\n },\n },\n }\n", "path": "readthedocs/settings/base.py"}]}
| 4,065 | 193 |
gh_patches_debug_16929
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-306
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix content app not showing file size for 0 byte files
fixes: #5100
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 requirements = [
7 'coreapi~=2.3.3',
8 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to
9 'django-filter~=2.2.0',
10 'djangorestframework~=3.10.2',
11 'djangorestframework-queryfields~=1.0.0',
12 'drf-nested-routers~=0.91.0',
13 'drf-yasg~=1.16.1',
14 'gunicorn~=19.9.0',
15 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
16 'PyYAML~=5.1.1',
17 'rq~=1.1.0',
18 'redis~=3.1.0',
19 'setuptools>=41.0.1,<41.3.0',
20 'dynaconf~=2.1.0',
21 'whitenoise~=4.1.3',
22 ]
23
24 setup(
25 name='pulpcore',
26 version='3.0.0rc6.dev',
27 description='Pulp Django Application and Related Modules',
28 long_description=long_description,
29 long_description_content_type="text/markdown",
30 license='GPLv2+',
31 packages=find_packages(exclude=['test']),
32 author='Pulp Team',
33 author_email='[email protected]',
34 url='http://www.pulpproject.org',
35 python_requires='>=3.6',
36 install_requires=requirements,
37 extras_require={
38 'postgres': ['psycopg2-binary'],
39 'mysql': ['mysqlclient']
40 },
41 include_package_data=True,
42 classifiers=(
43 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
44 'Operating System :: POSIX :: Linux',
45 'Development Status :: 4 - Beta',
46 'Framework :: Django',
47 'Programming Language :: Python',
48 'Programming Language :: Python :: 3',
49 'Programming Language :: Python :: 3.6',
50 'Programming Language :: Python :: 3.7',
51 ),
52 scripts=['bin/pulp-content'],
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,7 @@
'drf-yasg~=1.16.1',
'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
+ 'psycopg2-binary',
'PyYAML~=5.1.1',
'rq~=1.1.0',
'redis~=3.1.0',
@@ -34,10 +35,6 @@
url='http://www.pulpproject.org',
python_requires='>=3.6',
install_requires=requirements,
- extras_require={
- 'postgres': ['psycopg2-binary'],
- 'mysql': ['mysqlclient']
- },
include_package_data=True,
classifiers=(
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,6 +13,7 @@\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n+ 'psycopg2-binary',\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n@@ -34,10 +35,6 @@\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n- extras_require={\n- 'postgres': ['psycopg2-binary'],\n- 'mysql': ['mysqlclient']\n- },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n", "issue": "Fix content app not showing file size for 0 byte files\nfixes: #5100\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi~=2.3.3',\n 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n 'django-filter~=2.2.0',\n 'djangorestframework~=3.10.2',\n 'djangorestframework-queryfields~=1.0.0',\n 'drf-nested-routers~=0.91.0',\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n 'setuptools>=41.0.1,<41.3.0',\n 'dynaconf~=2.1.0',\n 'whitenoise~=4.1.3',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc6.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi~=2.3.3',\n 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n 'django-filter~=2.2.0',\n 'djangorestframework~=3.10.2',\n 'djangorestframework-queryfields~=1.0.0',\n 'drf-nested-routers~=0.91.0',\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'psycopg2-binary',\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n 'setuptools>=41.0.1,<41.3.0',\n 'dynaconf~=2.1.0',\n 'whitenoise~=4.1.3',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc6.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}]}
| 891 | 241 |
gh_patches_debug_23526
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-3589
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sy.grid.register() should print useful information
**Is your feature request related to a problem? Please describe.**
When registering a node on OpenGrid, we want to convey some information to the user using sys.stdout.write()
A few things we thought to add.
- Information: connecting to opengrid...etc.
- Information: Can I connect to the main grid node... graceful error message if you can't.
- Disclaimer: OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.
- Where to get Help:
- Join our slack (slack.openmined.org) and ask for help in the #lib_syft channel.
- File a Github Issue: https://github.com/OpenMined/PySyft and add the string "#opengrid" in the issue title.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/grid/__init__.py`
Content:
```
1 from .network import Network
2 import uuid
3
4 DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
5
6
7 def register(**kwargs):
8 """ Add this process as a new peer registering it in the grid network.
9
10 Returns:
11 peer: Peer Network instance.
12 """
13 if not kwargs:
14 args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL}
15 else:
16 args = kwargs
17
18 peer_id = str(uuid.uuid4())
19 peer = Network(peer_id, **args)
20 peer.start()
21
22 return peer
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py
--- a/syft/grid/__init__.py
+++ b/syft/grid/__init__.py
@@ -1,4 +1,5 @@
from .network import Network
+import sys
import uuid
DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
@@ -16,7 +17,32 @@
args = kwargs
peer_id = str(uuid.uuid4())
+ sys.stdout.write(
+ "Connecting to OpenGrid (" + "\033[94m" + DEFAULT_NETWORK_URL + "\033[0m" + ") ... "
+ )
peer = Network(peer_id, **args)
+
+ sys.stdout.write("\033[92m" + "OK" + "\033[0m" + "\n")
+ sys.stdout.write("Peer ID: " + peer_id + "\n")
+
+ sys.stdout.write(
+ "\033[93m" + "DISCLAIMER" + "\033[0m"
+ ":"
+ + "\033[1m"
+ + " OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\n"
+ + "\033[0m"
+ )
+
+ sys.stdout.write("Where to get help: \n")
+ sys.stdout.write(
+ " - Join our slack (https://slack.openmined.org) and ask for help in the #lib_syft channel.\n"
+ )
+ sys.stdout.write(
+ " - File a Github Issue: https://github.com/OpenMined/PySyft and add the string '#opengrid' in the issue title.\n"
+ )
+ sys.stdout.write(
+ " - Want to join in our development team? Apply here: https://forms.gle/wcH1vxzvPyDSbSVW6\n"
+ )
peer.start()
return peer
|
{"golden_diff": "diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py\n--- a/syft/grid/__init__.py\n+++ b/syft/grid/__init__.py\n@@ -1,4 +1,5 @@\n from .network import Network\n+import sys\n import uuid\n \n DEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n@@ -16,7 +17,32 @@\n args = kwargs\n \n peer_id = str(uuid.uuid4())\n+ sys.stdout.write(\n+ \"Connecting to OpenGrid (\" + \"\\033[94m\" + DEFAULT_NETWORK_URL + \"\\033[0m\" + \") ... \"\n+ )\n peer = Network(peer_id, **args)\n+\n+ sys.stdout.write(\"\\033[92m\" + \"OK\" + \"\\033[0m\" + \"\\n\")\n+ sys.stdout.write(\"Peer ID: \" + peer_id + \"\\n\")\n+\n+ sys.stdout.write(\n+ \"\\033[93m\" + \"DISCLAIMER\" + \"\\033[0m\"\n+ \":\"\n+ + \"\\033[1m\"\n+ + \" OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\\n\"\n+ + \"\\033[0m\"\n+ )\n+\n+ sys.stdout.write(\"Where to get help: \\n\")\n+ sys.stdout.write(\n+ \" - Join our slack (https://slack.openmined.org) and ask for help in the #lib_syft channel.\\n\"\n+ )\n+ sys.stdout.write(\n+ \" - File a Github Issue: https://github.com/OpenMined/PySyft and add the string '#opengrid' in the issue title.\\n\"\n+ )\n+ sys.stdout.write(\n+ \" - Want to join in our development team? Apply here: https://forms.gle/wcH1vxzvPyDSbSVW6\\n\"\n+ )\n peer.start()\n \n return peer\n", "issue": "sy.grid.register() should print useful information\n**Is your feature request related to a problem? Please describe.**\r\nWhen registering a node on OpenGrid, we want to convey some information to the user using sys.stdout.write()\r\n\r\nA few things we thought to add.\r\n\r\n- Information: connecting to opengrid...etc.\r\n - Information: Can I connect to the main grid node... graceful error message if you can't.\r\n- Disclaimer: OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\r\n- Where to get Help:\r\n - Join our slack (slack.openmined.org) and ask for help in the #lib_syft channel.\r\n - File a Github Issue: https://github.com/OpenMined/PySyft and add the string \"#opengrid\" in the issue title.\r\n \r\n\n", "before_files": [{"content": "from .network import Network\nimport uuid\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer_id = str(uuid.uuid4())\n peer = Network(peer_id, **args)\n peer.start()\n\n return peer\n", "path": "syft/grid/__init__.py"}], "after_files": [{"content": "from .network import Network\nimport sys\nimport uuid\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer_id = str(uuid.uuid4())\n sys.stdout.write(\n \"Connecting to OpenGrid (\" + \"\\033[94m\" + DEFAULT_NETWORK_URL + \"\\033[0m\" + \") ... \"\n )\n peer = Network(peer_id, **args)\n\n sys.stdout.write(\"\\033[92m\" + \"OK\" + \"\\033[0m\" + \"\\n\")\n sys.stdout.write(\"Peer ID: \" + peer_id + \"\\n\")\n\n sys.stdout.write(\n \"\\033[93m\" + \"DISCLAIMER\" + \"\\033[0m\"\n \":\"\n + \"\\033[1m\"\n + \" OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\\n\"\n + \"\\033[0m\"\n )\n\n sys.stdout.write(\"Where to get help: \\n\")\n sys.stdout.write(\n \" - Join our slack (https://slack.openmined.org) and ask for help in the #lib_syft channel.\\n\"\n )\n sys.stdout.write(\n \" - File a Github Issue: https://github.com/OpenMined/PySyft and add the string '#opengrid' in the issue title.\\n\"\n )\n sys.stdout.write(\n \" - Want to join in our development team? Apply here: https://forms.gle/wcH1vxzvPyDSbSVW6\\n\"\n )\n peer.start()\n\n return peer\n", "path": "syft/grid/__init__.py"}]}
| 615 | 468 |
gh_patches_debug_13740
|
rasdani/github-patches
|
git_diff
|
TOMToolkit__tom_base-580
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
not having a LASAIR token in Settings.py breaks
Fail more gracefully if no LASAIR token in settings.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tom_alerts/brokers/lasair.py`
Content:
```
1 import requests
2 from urllib.parse import urlencode
3
4 from crispy_forms.layout import HTML, Layout, Div, Fieldset, Row, Column
5 from django import forms
6 from django.conf import settings
7
8 from tom_alerts.alerts import GenericQueryForm, GenericAlert, GenericBroker
9 from tom_targets.models import Target
10
11 LASAIR_URL = 'https://lasair-ztf.lsst.ac.uk'
12
13
14 class LasairBrokerForm(GenericQueryForm):
15 cone_ra = forms.CharField(required=False, label='RA', help_text='Object RA (Decimal Degrees)',
16 widget=forms.TextInput(attrs={'placeholder': '1.2345'}))
17 cone_dec = forms.CharField(required=False, label='Dec', help_text='Object Dec (Decimal Degrees)',
18 widget=forms.TextInput(attrs={'placeholder': '1.2345'}))
19 cone_radius = forms.CharField(required=False, label='Radius', help_text='Search Radius (Arcsec)', initial='10',
20 widget=forms.TextInput(attrs={'placeholder': '10'}))
21 sqlquery = forms.CharField(required=False, label='SQL Query Conditions',
22 help_text='The "WHERE" criteria to restrict which objects are returned. '
23 '(i.e. gmag < 12.0)')
24
25 def __init__(self, *args, **kwargs):
26 super().__init__(*args, **kwargs)
27 self.helper.layout = Layout(
28 HTML('''
29 <p>
30 Please see the <a href="https://lasair-ztf.lsst.ac.uk/api">Lasair website</a> for more detailed
31 instructions on querying the broker.
32 '''),
33 self.common_layout,
34 Fieldset(
35 'Cone Search',
36 Row(
37 Column('cone_ra', css_class='form-group col-md-4 mb-0'),
38 Column('cone_dec', css_class='form-group col-md-4 mb-0'),
39 Column('cone_radius', css_class='form-group col-md-4 mb-0'),
40 css_class='form-row'
41 ),
42 HTML("""<br>
43 <h4>SQL Query Search</h4>
44 """),
45
46 Div('sqlquery')
47 )
48 )
49
50 def clean(self):
51 cleaned_data = super().clean()
52
53 # Ensure that either cone search or sqlquery are populated
54 if not ((cleaned_data['cone_ra'] and cleaned_data['cone_dec']) or cleaned_data['sqlquery']):
55 raise forms.ValidationError('Either RA/Dec or Freeform SQL Query must be populated.')
56
57 return cleaned_data
58
59
60 def get_lasair_object(obj):
61 """Parse lasair object table"""
62 objectid = obj['objectId']
63 jdmax = obj['candidates'][0]['mjd']
64 ra = obj['objectData']['ramean']
65 dec = obj['objectData']['decmean']
66 glon = obj['objectData']['glonmean']
67 glat = obj['objectData']['glatmean']
68 magpsf = obj['candidates'][0]['magpsf']
69 return {
70 'alert_id': objectid,
71 'timestamp': jdmax,
72 'ra': ra,
73 'dec': dec,
74 'galactic_lng': glon,
75 'galactic_lat': glat,
76 'mag': magpsf
77 }
78
79
80 class LasairBroker(GenericBroker):
81 """
82 The ``LasairBroker`` is the interface to the Lasair alert broker. For information regarding the query format for
83 Lasair, please see https://lasair-ztf.lsst.ac.uk/.
84
85 Requires a LASAIR_TOKEN in settings.py.
86 See https://lasair-ztf.lsst.ac.uk/api for details about how to acquire an authorization token.
87 """
88
89 name = 'Lasair'
90 form = LasairBrokerForm
91
92 def fetch_alerts(self, parameters):
93 token = settings.LASAIR_TOKEN
94 alerts = []
95 broker_feedback = ''
96 object_ids = ''
97
98 # Check for Cone Search
99 if 'cone_ra' in parameters and len(parameters['cone_ra'].strip()) > 0 and\
100 'cone_dec' in parameters and len(parameters['cone_dec'].strip()) > 0:
101
102 cone_query = {'ra': parameters['cone_ra'].strip(),
103 'dec': parameters['cone_dec'].strip(),
104 'radius': parameters['cone_radius'].strip(), # defaults to 10"
105 'requestType': 'all' # Return all objects within radius
106 }
107 parsed_cone_query = urlencode(cone_query)
108
109 # Query LASAIR Cone Search API
110 cone_response = requests.get(
111 LASAIR_URL + '/api/cone/?' + parsed_cone_query + f'&token={token}&format=json'
112 )
113 search_results = cone_response.json()
114 # Successful Search ~ [{'object': 'ZTF19abuaekk', 'separation': 205.06135003141878},...]
115 # Unsuccessful Search ~ {'error': 'No object found ...'}
116 try:
117 # Provide comma separated string of Object IDs matching search criteria
118 object_ids = ','.join([result['object'] for result in search_results])
119 except TypeError:
120 for key in search_results:
121 broker_feedback += f'{key}:{search_results[key]}'
122
123 # Check for SQL Condition Query
124 elif 'sqlquery' in parameters and len(parameters['sqlquery'].strip()) > 0:
125 sql_query = {'selected': 'objectId', # The only parameter we need returned is the objectId
126 'tables': 'objects', # The only table we need to search is the objects table
127 'conditions': parameters['sqlquery'].strip(),
128 'limit': '1000' # limit number of returned objects to 1000
129 }
130 parsed_sql_query = urlencode(sql_query)
131
132 # Query LASAIR SQLQuery API
133 query_response = requests.get(
134 LASAIR_URL + '/api/query/?' + parsed_sql_query + f'&token={token}&format=json'
135 )
136
137 search_results = query_response.json()
138 # Successful Search ~ [{'objectId': 'ZTF18aagzzzz'},...]
139 # Unsuccessful Search ~ []
140 try:
141 # Provide comma separated string of Object IDs matching search criteria
142 object_ids = ','.join([result['objectId'] for result in search_results])
143 except TypeError:
144 for key in search_results:
145 broker_feedback += f'{key}:{search_results[key]}'
146
147 # Supply feedback for empty results
148 if not object_ids and not broker_feedback:
149 broker_feedback += f"No objects found with conditions: {sql_query['conditions']}"
150 else:
151 return iter(alerts), broker_feedback
152
153 if object_ids:
154 # Query LASAIR object API
155 obj_response = requests.get(
156 LASAIR_URL + '/api/objects/' + f'?objectIds={object_ids}&token={token}&format=json'
157 )
158 obj_results = obj_response.json()
159 # Successful Search ~ [{'objectId': 'ZTF19abuaekk', 'objectData': {...}},...]
160
161 for obj in obj_results:
162 alerts.append(get_lasair_object(obj))
163 return iter(alerts), broker_feedback
164
165 def fetch_alert(self, alert_id):
166 url = LASAIR_URL + '/object/' + alert_id + '/json/'
167 response = requests.get(url)
168 response.raise_for_status()
169 parsed = response.json()
170 return parsed
171
172 def process_reduced_data(self, target, alert=None):
173 pass
174
175 def to_generic_alert(self, alert):
176 return GenericAlert(
177 url=LASAIR_URL + '/object/' + alert['alert_id'],
178 id=alert['alert_id'],
179 name=alert['alert_id'],
180 ra=alert['ra'],
181 dec=alert['dec'],
182 timestamp=alert['timestamp'],
183 mag=alert['mag'],
184 score=1, # dunno what this means ..?
185 )
186
187 def to_target(self, alert):
188 for c in alert['candidates']:
189 if 'candid' in c:
190 break
191 return Target.objects.create(
192 name=alert.get('objectId'),
193 type='SIDEREAL',
194 ra=alert['objectData']['ramean'],
195 dec=alert['objectData']['decmean'],
196 galactic_lng=alert['objectData']['glonmean'],
197 galactic_lat=alert['objectData']['glatmean'],
198 )
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tom_alerts/brokers/lasair.py b/tom_alerts/brokers/lasair.py
--- a/tom_alerts/brokers/lasair.py
+++ b/tom_alerts/brokers/lasair.py
@@ -90,10 +90,15 @@
form = LasairBrokerForm
def fetch_alerts(self, parameters):
- token = settings.LASAIR_TOKEN
alerts = []
broker_feedback = ''
object_ids = ''
+ try:
+ token = settings.LASAIR_TOKEN
+ except AttributeError:
+ broker_feedback += 'Requires a LASAIR_TOKEN in settings.py. See https://lasair-ztf.lsst.ac.uk/api' \
+ ' for details about how to acquire an authorization token.'
+ return iter(alerts), broker_feedback
# Check for Cone Search
if 'cone_ra' in parameters and len(parameters['cone_ra'].strip()) > 0 and\
|
{"golden_diff": "diff --git a/tom_alerts/brokers/lasair.py b/tom_alerts/brokers/lasair.py\n--- a/tom_alerts/brokers/lasair.py\n+++ b/tom_alerts/brokers/lasair.py\n@@ -90,10 +90,15 @@\n form = LasairBrokerForm\n \n def fetch_alerts(self, parameters):\n- token = settings.LASAIR_TOKEN\n alerts = []\n broker_feedback = ''\n object_ids = ''\n+ try:\n+ token = settings.LASAIR_TOKEN\n+ except AttributeError:\n+ broker_feedback += 'Requires a LASAIR_TOKEN in settings.py. See https://lasair-ztf.lsst.ac.uk/api' \\\n+ ' for details about how to acquire an authorization token.'\n+ return iter(alerts), broker_feedback\n \n # Check for Cone Search\n if 'cone_ra' in parameters and len(parameters['cone_ra'].strip()) > 0 and\\\n", "issue": "not having a LASAIR token in Settings.py breaks\nFail more gracefully if no LASAIR token in settings.py\n", "before_files": [{"content": "import requests\nfrom urllib.parse import urlencode\n\nfrom crispy_forms.layout import HTML, Layout, Div, Fieldset, Row, Column\nfrom django import forms\nfrom django.conf import settings\n\nfrom tom_alerts.alerts import GenericQueryForm, GenericAlert, GenericBroker\nfrom tom_targets.models import Target\n\nLASAIR_URL = 'https://lasair-ztf.lsst.ac.uk'\n\n\nclass LasairBrokerForm(GenericQueryForm):\n cone_ra = forms.CharField(required=False, label='RA', help_text='Object RA (Decimal Degrees)',\n widget=forms.TextInput(attrs={'placeholder': '1.2345'}))\n cone_dec = forms.CharField(required=False, label='Dec', help_text='Object Dec (Decimal Degrees)',\n widget=forms.TextInput(attrs={'placeholder': '1.2345'}))\n cone_radius = forms.CharField(required=False, label='Radius', help_text='Search Radius (Arcsec)', initial='10',\n widget=forms.TextInput(attrs={'placeholder': '10'}))\n sqlquery = forms.CharField(required=False, label='SQL Query Conditions',\n help_text='The \"WHERE\" criteria to restrict which objects are returned. '\n '(i.e. gmag < 12.0)')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper.layout = Layout(\n HTML('''\n <p>\n Please see the <a href=\"https://lasair-ztf.lsst.ac.uk/api\">Lasair website</a> for more detailed\n instructions on querying the broker.\n '''),\n self.common_layout,\n Fieldset(\n 'Cone Search',\n Row(\n Column('cone_ra', css_class='form-group col-md-4 mb-0'),\n Column('cone_dec', css_class='form-group col-md-4 mb-0'),\n Column('cone_radius', css_class='form-group col-md-4 mb-0'),\n css_class='form-row'\n ),\n HTML(\"\"\"<br>\n <h4>SQL Query Search</h4>\n \"\"\"),\n\n Div('sqlquery')\n )\n )\n\n def clean(self):\n cleaned_data = super().clean()\n\n # Ensure that either cone search or sqlquery are populated\n if not ((cleaned_data['cone_ra'] and cleaned_data['cone_dec']) or cleaned_data['sqlquery']):\n raise forms.ValidationError('Either RA/Dec or Freeform SQL Query must be populated.')\n\n return cleaned_data\n\n\ndef get_lasair_object(obj):\n \"\"\"Parse lasair object table\"\"\"\n objectid = obj['objectId']\n jdmax = obj['candidates'][0]['mjd']\n ra = obj['objectData']['ramean']\n dec = obj['objectData']['decmean']\n glon = obj['objectData']['glonmean']\n glat = obj['objectData']['glatmean']\n magpsf = obj['candidates'][0]['magpsf']\n return {\n 'alert_id': objectid,\n 'timestamp': jdmax,\n 'ra': ra,\n 'dec': dec,\n 'galactic_lng': glon,\n 'galactic_lat': glat,\n 'mag': magpsf\n }\n\n\nclass LasairBroker(GenericBroker):\n \"\"\"\n The ``LasairBroker`` is the interface to the Lasair alert broker. For information regarding the query format for\n Lasair, please see https://lasair-ztf.lsst.ac.uk/.\n\n Requires a LASAIR_TOKEN in settings.py.\n See https://lasair-ztf.lsst.ac.uk/api for details about how to acquire an authorization token.\n \"\"\"\n\n name = 'Lasair'\n form = LasairBrokerForm\n\n def fetch_alerts(self, parameters):\n token = settings.LASAIR_TOKEN\n alerts = []\n broker_feedback = ''\n object_ids = ''\n\n # Check for Cone Search\n if 'cone_ra' in parameters and len(parameters['cone_ra'].strip()) > 0 and\\\n 'cone_dec' in parameters and len(parameters['cone_dec'].strip()) > 0:\n\n cone_query = {'ra': parameters['cone_ra'].strip(),\n 'dec': parameters['cone_dec'].strip(),\n 'radius': parameters['cone_radius'].strip(), # defaults to 10\"\n 'requestType': 'all' # Return all objects within radius\n }\n parsed_cone_query = urlencode(cone_query)\n\n # Query LASAIR Cone Search API\n cone_response = requests.get(\n LASAIR_URL + '/api/cone/?' + parsed_cone_query + f'&token={token}&format=json'\n )\n search_results = cone_response.json()\n # Successful Search ~ [{'object': 'ZTF19abuaekk', 'separation': 205.06135003141878},...]\n # Unsuccessful Search ~ {'error': 'No object found ...'}\n try:\n # Provide comma separated string of Object IDs matching search criteria\n object_ids = ','.join([result['object'] for result in search_results])\n except TypeError:\n for key in search_results:\n broker_feedback += f'{key}:{search_results[key]}'\n\n # Check for SQL Condition Query\n elif 'sqlquery' in parameters and len(parameters['sqlquery'].strip()) > 0:\n sql_query = {'selected': 'objectId', # The only parameter we need returned is the objectId\n 'tables': 'objects', # The only table we need to search is the objects table\n 'conditions': parameters['sqlquery'].strip(),\n 'limit': '1000' # limit number of returned objects to 1000\n }\n parsed_sql_query = urlencode(sql_query)\n\n # Query LASAIR SQLQuery API\n query_response = requests.get(\n LASAIR_URL + '/api/query/?' + parsed_sql_query + f'&token={token}&format=json'\n )\n\n search_results = query_response.json()\n # Successful Search ~ [{'objectId': 'ZTF18aagzzzz'},...]\n # Unsuccessful Search ~ []\n try:\n # Provide comma separated string of Object IDs matching search criteria\n object_ids = ','.join([result['objectId'] for result in search_results])\n except TypeError:\n for key in search_results:\n broker_feedback += f'{key}:{search_results[key]}'\n\n # Supply feedback for empty results\n if not object_ids and not broker_feedback:\n broker_feedback += f\"No objects found with conditions: {sql_query['conditions']}\"\n else:\n return iter(alerts), broker_feedback\n\n if object_ids:\n # Query LASAIR object API\n obj_response = requests.get(\n LASAIR_URL + '/api/objects/' + f'?objectIds={object_ids}&token={token}&format=json'\n )\n obj_results = obj_response.json()\n # Successful Search ~ [{'objectId': 'ZTF19abuaekk', 'objectData': {...}},...]\n\n for obj in obj_results:\n alerts.append(get_lasair_object(obj))\n return iter(alerts), broker_feedback\n\n def fetch_alert(self, alert_id):\n url = LASAIR_URL + '/object/' + alert_id + '/json/'\n response = requests.get(url)\n response.raise_for_status()\n parsed = response.json()\n return parsed\n\n def process_reduced_data(self, target, alert=None):\n pass\n\n def to_generic_alert(self, alert):\n return GenericAlert(\n url=LASAIR_URL + '/object/' + alert['alert_id'],\n id=alert['alert_id'],\n name=alert['alert_id'],\n ra=alert['ra'],\n dec=alert['dec'],\n timestamp=alert['timestamp'],\n mag=alert['mag'],\n score=1, # dunno what this means ..?\n )\n\n def to_target(self, alert):\n for c in alert['candidates']:\n if 'candid' in c:\n break\n return Target.objects.create(\n name=alert.get('objectId'),\n type='SIDEREAL',\n ra=alert['objectData']['ramean'],\n dec=alert['objectData']['decmean'],\n galactic_lng=alert['objectData']['glonmean'],\n galactic_lat=alert['objectData']['glatmean'],\n )\n", "path": "tom_alerts/brokers/lasair.py"}], "after_files": [{"content": "import requests\nfrom urllib.parse import urlencode\n\nfrom crispy_forms.layout import HTML, Layout, Div, Fieldset, Row, Column\nfrom django import forms\nfrom django.conf import settings\n\nfrom tom_alerts.alerts import GenericQueryForm, GenericAlert, GenericBroker\nfrom tom_targets.models import Target\n\nLASAIR_URL = 'https://lasair-ztf.lsst.ac.uk'\n\n\nclass LasairBrokerForm(GenericQueryForm):\n cone_ra = forms.CharField(required=False, label='RA', help_text='Object RA (Decimal Degrees)',\n widget=forms.TextInput(attrs={'placeholder': '1.2345'}))\n cone_dec = forms.CharField(required=False, label='Dec', help_text='Object Dec (Decimal Degrees)',\n widget=forms.TextInput(attrs={'placeholder': '1.2345'}))\n cone_radius = forms.CharField(required=False, label='Radius', help_text='Search Radius (Arcsec)', initial='10',\n widget=forms.TextInput(attrs={'placeholder': '10'}))\n sqlquery = forms.CharField(required=False, label='SQL Query Conditions',\n help_text='The \"WHERE\" criteria to restrict which objects are returned. '\n '(i.e. gmag < 12.0)')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper.layout = Layout(\n HTML('''\n <p>\n Please see the <a href=\"https://lasair-ztf.lsst.ac.uk/api\">Lasair website</a> for more detailed\n instructions on querying the broker.\n '''),\n self.common_layout,\n Fieldset(\n 'Cone Search',\n Row(\n Column('cone_ra', css_class='form-group col-md-4 mb-0'),\n Column('cone_dec', css_class='form-group col-md-4 mb-0'),\n Column('cone_radius', css_class='form-group col-md-4 mb-0'),\n css_class='form-row'\n ),\n HTML(\"\"\"<br>\n <h4>SQL Query Search</h4>\n \"\"\"),\n\n Div('sqlquery')\n )\n )\n\n def clean(self):\n cleaned_data = super().clean()\n\n # Ensure that either cone search or sqlquery are populated\n if not ((cleaned_data['cone_ra'] and cleaned_data['cone_dec']) or cleaned_data['sqlquery']):\n raise forms.ValidationError('Either RA/Dec or Freeform SQL Query must be populated.')\n\n return cleaned_data\n\n\ndef get_lasair_object(obj):\n \"\"\"Parse lasair object table\"\"\"\n objectid = obj['objectId']\n jdmax = obj['candidates'][0]['mjd']\n ra = obj['objectData']['ramean']\n dec = obj['objectData']['decmean']\n glon = obj['objectData']['glonmean']\n glat = obj['objectData']['glatmean']\n magpsf = obj['candidates'][0]['magpsf']\n return {\n 'alert_id': objectid,\n 'timestamp': jdmax,\n 'ra': ra,\n 'dec': dec,\n 'galactic_lng': glon,\n 'galactic_lat': glat,\n 'mag': magpsf\n }\n\n\nclass LasairBroker(GenericBroker):\n \"\"\"\n The ``LasairBroker`` is the interface to the Lasair alert broker. For information regarding the query format for\n Lasair, please see https://lasair-ztf.lsst.ac.uk/.\n\n Requires a LASAIR_TOKEN in settings.py.\n See https://lasair-ztf.lsst.ac.uk/api for details about how to acquire an authorization token.\n \"\"\"\n\n name = 'Lasair'\n form = LasairBrokerForm\n\n def fetch_alerts(self, parameters):\n alerts = []\n broker_feedback = ''\n object_ids = ''\n try:\n token = settings.LASAIR_TOKEN\n except AttributeError:\n broker_feedback += 'Requires a LASAIR_TOKEN in settings.py. See https://lasair-ztf.lsst.ac.uk/api' \\\n ' for details about how to acquire an authorization token.'\n return iter(alerts), broker_feedback\n\n # Check for Cone Search\n if 'cone_ra' in parameters and len(parameters['cone_ra'].strip()) > 0 and\\\n 'cone_dec' in parameters and len(parameters['cone_dec'].strip()) > 0:\n\n cone_query = {'ra': parameters['cone_ra'].strip(),\n 'dec': parameters['cone_dec'].strip(),\n 'radius': parameters['cone_radius'].strip(), # defaults to 10\"\n 'requestType': 'all' # Return all objects within radius\n }\n parsed_cone_query = urlencode(cone_query)\n\n # Query LASAIR Cone Search API\n cone_response = requests.get(\n LASAIR_URL + '/api/cone/?' + parsed_cone_query + f'&token={token}&format=json'\n )\n search_results = cone_response.json()\n # Successful Search ~ [{'object': 'ZTF19abuaekk', 'separation': 205.06135003141878},...]\n # Unsuccessful Search ~ {'error': 'No object found ...'}\n try:\n # Provide comma separated string of Object IDs matching search criteria\n object_ids = ','.join([result['object'] for result in search_results])\n except TypeError:\n for key in search_results:\n broker_feedback += f'{key}:{search_results[key]}'\n\n # Check for SQL Condition Query\n elif 'sqlquery' in parameters and len(parameters['sqlquery'].strip()) > 0:\n sql_query = {'selected': 'objectId', # The only parameter we need returned is the objectId\n 'tables': 'objects', # The only table we need to search is the objects table\n 'conditions': parameters['sqlquery'].strip(),\n 'limit': '1000' # limit number of returned objects to 1000\n }\n parsed_sql_query = urlencode(sql_query)\n\n # Query LASAIR SQLQuery API\n query_response = requests.get(\n LASAIR_URL + '/api/query/?' + parsed_sql_query + f'&token={token}&format=json'\n )\n\n search_results = query_response.json()\n # Successful Search ~ [{'objectId': 'ZTF18aagzzzz'},...]\n # Unsuccessful Search ~ []\n try:\n # Provide comma separated string of Object IDs matching search criteria\n object_ids = ','.join([result['objectId'] for result in search_results])\n except TypeError:\n for key in search_results:\n broker_feedback += f'{key}:{search_results[key]}'\n\n # Supply feedback for empty results\n if not object_ids and not broker_feedback:\n broker_feedback += f\"No objects found with conditions: {sql_query['conditions']}\"\n else:\n return iter(alerts), broker_feedback\n\n if object_ids:\n # Query LASAIR object API\n obj_response = requests.get(\n LASAIR_URL + '/api/objects/' + f'?objectIds={object_ids}&token={token}&format=json'\n )\n obj_results = obj_response.json()\n # Successful Search ~ [{'objectId': 'ZTF19abuaekk', 'objectData': {...}},...]\n\n for obj in obj_results:\n alerts.append(get_lasair_object(obj))\n return iter(alerts), broker_feedback\n\n def fetch_alert(self, alert_id):\n url = LASAIR_URL + '/object/' + alert_id + '/json/'\n response = requests.get(url)\n response.raise_for_status()\n parsed = response.json()\n return parsed\n\n def process_reduced_data(self, target, alert=None):\n pass\n\n def to_generic_alert(self, alert):\n return GenericAlert(\n url=LASAIR_URL + '/object/' + alert['alert_id'],\n id=alert['alert_id'],\n name=alert['alert_id'],\n ra=alert['ra'],\n dec=alert['dec'],\n timestamp=alert['timestamp'],\n mag=alert['mag'],\n score=1, # dunno what this means ..?\n )\n\n def to_target(self, alert):\n for c in alert['candidates']:\n if 'candid' in c:\n break\n return Target.objects.create(\n name=alert.get('objectId'),\n type='SIDEREAL',\n ra=alert['objectData']['ramean'],\n dec=alert['objectData']['decmean'],\n galactic_lng=alert['objectData']['glonmean'],\n galactic_lat=alert['objectData']['glatmean'],\n )\n", "path": "tom_alerts/brokers/lasair.py"}]}
| 2,573 | 211 |
gh_patches_debug_23054
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-862
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update PyPI keywords and classifies in setup.py
# Description
As JAX is now a supported backend then it should additionally be added to the [list of keywords in `setup.py`](https://github.com/scikit-hep/pyhf/blob/917bd5127c1da023b279c076bb41614fbb859487/setup.py#L85). Additionally, the [classifies](https://packaging.python.org/guides/distributing-packages-using-setuptools/#classifiers) should be updated as well to include a `Development Status`, `License`, `Intended Audience`, and `Topic`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 ],
92 package_dir={'': 'src'},
93 packages=find_packages(where='src'),
94 include_package_data=True,
95 python_requires=">=3.6",
96 install_requires=[
97 'scipy', # requires numpy, which is required by pyhf and tensorflow
98 'click>=6.0', # for console scripts,
99 'tqdm', # for readxml
100 'jsonschema>=3.2.0', # for utils
101 'jsonpatch',
102 'pyyaml', # for parsing CLI equal-delimited options
103 ],
104 extras_require=extras_require,
105 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
106 dependency_links=[],
107 use_scm_version=lambda: {'local_scheme': lambda version: ''},
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -79,11 +79,21 @@
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/scikit-hep/pyhf',
+ project_urls={
+ "Documentation": "https://scikit-hep.org/pyhf/",
+ "Source": "https://github.com/scikit-hep/pyhf",
+ "Tracker": "https://github.com/scikit-hep/pyhf/issues",
+ },
author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
author_email='[email protected], [email protected], [email protected]',
license='Apache',
- keywords='physics fitting numpy scipy tensorflow pytorch',
+ keywords='physics fitting numpy scipy tensorflow pytorch jax',
classifiers=[
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Science/Research",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Physics",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -79,11 +79,21 @@\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n+ project_urls={\n+ \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n+ \"Source\": \"https://github.com/scikit-hep/pyhf\",\n+ \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n+ },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch',\n+ keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n+ \"Development Status :: 4 - Beta\",\n+ \"License :: OSI Approved :: Apache Software License\",\n+ \"Intended Audience :: Science/Research\",\n+ \"Topic :: Scientific/Engineering\",\n+ \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n", "issue": "Update PyPI keywords and classifies in setup.py\n# Description\r\n\r\nAs JAX is now a supported backend then it should additionally be added to the [list of keywords in `setup.py`](https://github.com/scikit-hep/pyhf/blob/917bd5127c1da023b279c076bb41614fbb859487/setup.py#L85). Additionally, the [classifies](https://packaging.python.org/guides/distributing-packages-using-setuptools/#classifiers) should be updated as well to include a `Development Status`, `License`, `Intended Audience`, and `Topic`.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n project_urls={\n \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n \"Source\": \"https://github.com/scikit-hep/pyhf\",\n \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,477 | 294 |
gh_patches_debug_2538
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-328
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git
Hi
When I try to run parsl I am getting the following issue:
fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git
Is it a real issue?
I am using python3 and jupyter but run parsl in a terminal.
Issue in parsl.log
I tried to run a simple script given in the parsl documentation
```
import parsl
from parsl import *
import time
workers = ThreadPoolExecutor(max_workers=4)
dfk = DataFlowKernel(executors=[workers])
print(1)
@App('python', dfk)
def hello ():
import time
time.sleep(5)
return 'Hello World!'
print(2)
app_future = hello()
print ('Done: %s' % app_future.done())
print ('Result: %s' % app_future.result())
print ('Done: %s' % app_future.done())
```
However, in the parsl.log shows this issue
2018-06-07 21:45:37 parsl.utils:24 [ERROR] Unable to determine code state
Traceback (most recent call last):
File "/homes/vvikraman/anaconda3/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
File "/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/utils.py`
Content:
```
1 import logging
2 import os
3 import shlex
4 import subprocess
5 import threading
6 import time
7 from contextlib import contextmanager
8 from functools import wraps
9
10 import parsl
11 from parsl.version import VERSION
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_version():
17 version = parsl.__version__
18 work_tree = os.path.dirname(os.path.dirname(__file__))
19 git_dir = os.path.join(work_tree, '.git')
20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
21 try:
22 cmd = shlex.split('git rev-parse --short HEAD')
23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
25 status = 'dirty' if diff else 'clean'
26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
27 except Exception as e:
28 logger.exception("Unable to determine code state")
29
30 return version
31
32
33 def get_all_checkpoints(rundir="runinfo"):
34 """Finds the checkpoints from all last runs.
35
36 Note that checkpoints are incremental, and this helper will not find
37 previous checkpoints from earlier than the most recent run. It probably
38 should be made to do so.
39
40 Kwargs:
41 - rundir(str) : Path to the runinfo directory
42
43 Returns:
44 - a list suitable for the checkpointFiles parameter of DataFlowKernel
45 constructor
46
47 """
48
49 if(not(os.path.isdir(rundir))):
50 return []
51
52 dirs = sorted(os.listdir(rundir))
53
54 checkpoints = []
55
56 for runid in dirs:
57
58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
59
60 if(os.path.isdir(checkpoint)):
61 checkpoints.append(checkpoint)
62
63 return checkpoints
64
65
66 def get_last_checkpoint(rundir="runinfo"):
67 """Finds the checkpoint from the last run, if one exists.
68
69 Note that checkpoints are incremental, and this helper will not find
70 previous checkpoints from earlier than the most recent run. It probably
71 should be made to do so.
72
73 Kwargs:
74 - rundir(str) : Path to the runinfo directory
75
76 Returns:
77 - a list suitable for checkpointFiles parameter of DataFlowKernel
78 constructor, with 0 or 1 elements
79
80 """
81
82 if(not(os.path.isdir(rundir))):
83 return []
84
85 dirs = sorted(os.listdir(rundir))
86
87 if(len(dirs) == 0):
88 return []
89
90 last_runid = dirs[-1]
91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
92
93 if(not(os.path.isdir(last_checkpoint))):
94 return []
95
96 return [last_checkpoint]
97
98
99 def timeout(seconds=None):
100 def decorator(func, *args, **kwargs):
101 @wraps(func)
102 def wrapper(*args, **kwargs):
103 t = threading.Thread(target=func, args=args, kwargs=kwargs)
104 t.start()
105 result = t.join(seconds)
106 if t.is_alive():
107 raise RuntimeError('timed out in {}'.format(func))
108 return result
109 return wrapper
110 return decorator
111
112
113 @contextmanager
114 def time_limited_open(path, mode, seconds=1):
115 @timeout(seconds)
116 def check_path(path):
117 while not os.path.exists(path):
118 time.sleep(0.1)
119 check_path(path)
120 f = open(path, mode)
121 yield f
122 f.close()
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/utils.py b/parsl/utils.py
--- a/parsl/utils.py
+++ b/parsl/utils.py
@@ -25,7 +25,7 @@
status = 'dirty' if diff else 'clean'
version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
except Exception as e:
- logger.exception("Unable to determine code state")
+ pass
return version
|
{"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -25,7 +25,7 @@\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n- logger.exception(\"Unable to determine code state\")\n+ pass\n \n return version\n", "issue": "fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git\nHi \r\nWhen I try to run parsl I am getting the following issue:\r\n\r\nfatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git\r\n\r\nIs it a real issue?\r\n\r\nI am using python3 and jupyter but run parsl in a terminal. \nIssue in parsl.log\nI tried to run a simple script given in the parsl documentation \r\n\r\n```\r\nimport parsl\r\nfrom parsl import *\r\nimport time\r\n\r\nworkers = ThreadPoolExecutor(max_workers=4)\r\ndfk = DataFlowKernel(executors=[workers])\r\nprint(1)\r\n@App('python', dfk)\r\ndef hello ():\r\n import time\r\n time.sleep(5)\r\n return 'Hello World!'\r\nprint(2)\r\napp_future = hello()\r\nprint ('Done: %s' % app_future.done())\r\nprint ('Result: %s' % app_future.result())\r\nprint ('Done: %s' % app_future.done())\r\n```\r\nHowever, in the parsl.log shows this issue\r\n\r\n2018-06-07 21:45:37 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n logger.exception(\"Unable to determine code state\")\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n @timeout(seconds)\n def check_path(path):\n while not os.path.exists(path):\n time.sleep(0.1)\n check_path(path)\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}], "after_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n @timeout(seconds)\n def check_path(path):\n while not os.path.exists(path):\n time.sleep(0.1)\n check_path(path)\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]}
| 1,730 | 102 |
gh_patches_debug_20440
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-10310
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feature] Tool: is_msvc helper
Our friend and great contributor SpaceIm is using a new detection which I believe could be part of the mainstream:
```python
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
```
This property can be largely re-used, when checking on `validate()` or any other condition.
- [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conan/tools/microsoft/__init__.py`
Content:
```
1 from conan.tools.microsoft.toolchain import MSBuildToolchain
2 from conan.tools.microsoft.msbuild import MSBuild
3 from conan.tools.microsoft.msbuilddeps import MSBuildDeps
4 from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars
5 from conan.tools.microsoft.subsystems import subsystem_path
6
```
Path: `conan/tools/microsoft/visual.py`
Content:
```
1 import os
2 import textwrap
3
4 from conans.client.tools import vs_installation_path
5 from conans.errors import ConanException
6
7 CONAN_VCVARS_FILE = "conanvcvars.bat"
8
9
10 def msvc_version_to_vs_ide_version(version):
11 _visuals = {'190': '14',
12 '191': '15',
13 '192': '16',
14 '193': '17'}
15 return _visuals[str(version)]
16
17
18 class VCVars:
19 def __init__(self, conanfile):
20 self._conanfile = conanfile
21
22 def generate(self, scope="build"):
23 """
24 write a conanvcvars.bat file with the good args from settings
25 """
26 conanfile = self._conanfile
27 os_ = conanfile.settings.get_safe("os")
28 if os_ != "Windows":
29 return
30
31 compiler = conanfile.settings.get_safe("compiler")
32 if compiler != "Visual Studio" and compiler != "msvc":
33 return
34
35 vs_version = vs_ide_version(conanfile)
36 vcvarsarch = vcvars_arch(conanfile)
37 vcvars_ver = _vcvars_vers(conanfile, compiler, vs_version)
38
39 vs_install_path = conanfile.conf["tools.microsoft.msbuild:installation_path"]
40 # The vs_install_path is like
41 # C:\Program Files (x86)\Microsoft Visual Studio\2019\Community
42 # C:\Program Files (x86)\Microsoft Visual Studio\2017\Community
43 # C:\Program Files (x86)\Microsoft Visual Studio 14.0
44 vcvars = vcvars_command(vs_version, architecture=vcvarsarch, platform_type=None,
45 winsdk_version=None, vcvars_ver=vcvars_ver,
46 vs_install_path=vs_install_path)
47
48 content = textwrap.dedent("""\
49 @echo off
50 {}
51 """.format(vcvars))
52 from conan.tools.env.environment import create_env_script
53 create_env_script(conanfile, content, CONAN_VCVARS_FILE, scope)
54
55
56 def vs_ide_version(conanfile):
57 compiler = conanfile.settings.get_safe("compiler")
58 compiler_version = (conanfile.settings.get_safe("compiler.base.version") or
59 conanfile.settings.get_safe("compiler.version"))
60 if compiler == "msvc":
61 toolset_override = conanfile.conf["tools.microsoft.msbuild:vs_version"]
62 if toolset_override:
63 visual_version = toolset_override
64 else:
65 visual_version = msvc_version_to_vs_ide_version(compiler_version)
66 else:
67 visual_version = compiler_version
68 return visual_version
69
70
71 def msvc_runtime_flag(conanfile):
72 settings = conanfile.settings
73 compiler = settings.get_safe("compiler")
74 runtime = settings.get_safe("compiler.runtime")
75 if compiler == "Visual Studio":
76 return runtime
77 if compiler == "msvc" or compiler == "intel-cc":
78 runtime_type = settings.get_safe("compiler.runtime_type")
79 runtime = "MT" if runtime == "static" else "MD"
80 if runtime_type == "Debug":
81 runtime = "{}d".format(runtime)
82 return runtime
83
84
85 def vcvars_command(version, architecture=None, platform_type=None, winsdk_version=None,
86 vcvars_ver=None, start_dir_cd=True, vs_install_path=None):
87 """ conan-agnostic construction of vcvars command
88 https://docs.microsoft.com/en-us/cpp/build/building-on-the-command-line
89 """
90 # TODO: This comes from conans/client/tools/win.py vcvars_command()
91 cmd = []
92 if start_dir_cd:
93 cmd.append('set "VSCMD_START_DIR=%CD%" &&')
94
95 # The "call" is useful in case it is called from another .bat script
96 cmd.append('call "%s" ' % _vcvars_path(version, vs_install_path))
97 if architecture:
98 cmd.append(architecture)
99 if platform_type:
100 cmd.append(platform_type)
101 if winsdk_version:
102 cmd.append(winsdk_version)
103 if vcvars_ver:
104 cmd.append("-vcvars_ver=%s" % vcvars_ver)
105 return " ".join(cmd)
106
107
108 def _vcvars_path(version, vs_install_path):
109 # TODO: This comes from conans/client/tools/win.py vcvars_command()
110 vs_path = vs_install_path or vs_installation_path(version)
111 if not vs_path or not os.path.isdir(vs_path):
112 raise ConanException("VS non-existing installation: Visual Studio %s" % version)
113
114 if int(version) > 14:
115 vcpath = os.path.join(vs_path, "VC/Auxiliary/Build/vcvarsall.bat")
116 else:
117 vcpath = os.path.join(vs_path, "VC/vcvarsall.bat")
118 return vcpath
119
120
121 def vcvars_arch(conanfile):
122 """
123 computes the vcvars command line architecture based on conanfile settings (host) and
124 settings_build
125 :param conanfile:
126 :return:
127 """
128 # TODO: This comes from conans/client/tools/win.py vcvars_command()
129 settings_host = conanfile.settings
130 try:
131 settings_build = conanfile.settings_build
132 except AttributeError:
133 settings_build = settings_host
134
135 arch_host = str(settings_host.arch)
136 arch_build = str(settings_build.arch)
137
138 arch = None
139 if arch_build == 'x86_64':
140 arch = {'x86': "amd64_x86",
141 'x86_64': 'amd64',
142 'armv7': 'amd64_arm',
143 'armv8': 'amd64_arm64'}.get(arch_host)
144 elif arch_build == 'x86':
145 arch = {'x86': 'x86',
146 'x86_64': 'x86_amd64',
147 'armv7': 'x86_arm',
148 'armv8': 'x86_arm64'}.get(arch_host)
149
150 if not arch:
151 raise ConanException('vcvars unsupported architectures %s-%s' % (arch_build, arch_host))
152
153 return arch
154
155
156 def _vcvars_vers(conanfile, compiler, vs_version):
157 if int(vs_version) <= 14:
158 return None
159
160 vcvars_ver = None
161 if compiler == "Visual Studio":
162 toolset = conanfile.settings.get_safe("compiler.toolset")
163 if toolset is not None:
164 vcvars_ver = {"v140": "14.0",
165 "v141": "14.1",
166 "v142": "14.2",
167 "v143": "14.3"}.get(toolset)
168 else:
169 assert compiler == "msvc"
170 # Code similar to CMakeToolchain toolset one
171 compiler_version = str(conanfile.settings.compiler.version)
172 # The equivalent of compiler 192 is toolset 14.2
173 vcvars_ver = "14.{}".format(compiler_version[-1])
174 return vcvars_ver
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conan/tools/microsoft/__init__.py b/conan/tools/microsoft/__init__.py
--- a/conan/tools/microsoft/__init__.py
+++ b/conan/tools/microsoft/__init__.py
@@ -1,5 +1,5 @@
from conan.tools.microsoft.toolchain import MSBuildToolchain
from conan.tools.microsoft.msbuild import MSBuild
from conan.tools.microsoft.msbuilddeps import MSBuildDeps
-from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars
+from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc
from conan.tools.microsoft.subsystems import subsystem_path
diff --git a/conan/tools/microsoft/visual.py b/conan/tools/microsoft/visual.py
--- a/conan/tools/microsoft/visual.py
+++ b/conan/tools/microsoft/visual.py
@@ -172,3 +172,12 @@
# The equivalent of compiler 192 is toolset 14.2
vcvars_ver = "14.{}".format(compiler_version[-1])
return vcvars_ver
+
+
+def is_msvc(conanfile):
+ """ Validate if current compiler in setttings is 'Visual Studio' or 'msvc'
+ :param conanfile: ConanFile instance
+ :return: True, if the host compiler is related to Visual Studio, otherwise, False.
+ """
+ settings = conanfile.settings
+ return settings.get_safe("compiler") in ["Visual Studio", "msvc"]
|
{"golden_diff": "diff --git a/conan/tools/microsoft/__init__.py b/conan/tools/microsoft/__init__.py\n--- a/conan/tools/microsoft/__init__.py\n+++ b/conan/tools/microsoft/__init__.py\n@@ -1,5 +1,5 @@\n from conan.tools.microsoft.toolchain import MSBuildToolchain\n from conan.tools.microsoft.msbuild import MSBuild\n from conan.tools.microsoft.msbuilddeps import MSBuildDeps\n-from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars\n+from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc\n from conan.tools.microsoft.subsystems import subsystem_path\ndiff --git a/conan/tools/microsoft/visual.py b/conan/tools/microsoft/visual.py\n--- a/conan/tools/microsoft/visual.py\n+++ b/conan/tools/microsoft/visual.py\n@@ -172,3 +172,12 @@\n # The equivalent of compiler 192 is toolset 14.2\n vcvars_ver = \"14.{}\".format(compiler_version[-1])\n return vcvars_ver\n+\n+\n+def is_msvc(conanfile):\n+ \"\"\" Validate if current compiler in setttings is 'Visual Studio' or 'msvc'\n+ :param conanfile: ConanFile instance\n+ :return: True, if the host compiler is related to Visual Studio, otherwise, False.\n+ \"\"\"\n+ settings = conanfile.settings\n+ return settings.get_safe(\"compiler\") in [\"Visual Studio\", \"msvc\"]\n", "issue": "[feature] Tool: is_msvc helper\nOur friend and great contributor SpaceIm is using a new detection which I believe could be part of the mainstream:\r\n\r\n```python\r\n@property\r\ndef _is_msvc(self):\r\n return str(self.settings.compiler) in [\"Visual Studio\", \"msvc\"]\r\n```\r\n\r\nThis property can be largely re-used, when checking on `validate()` or any other condition.\r\n\r\n\r\n- [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n\n", "before_files": [{"content": "from conan.tools.microsoft.toolchain import MSBuildToolchain\nfrom conan.tools.microsoft.msbuild import MSBuild\nfrom conan.tools.microsoft.msbuilddeps import MSBuildDeps\nfrom conan.tools.microsoft.visual import msvc_runtime_flag, VCVars\nfrom conan.tools.microsoft.subsystems import subsystem_path\n", "path": "conan/tools/microsoft/__init__.py"}, {"content": "import os\nimport textwrap\n\nfrom conans.client.tools import vs_installation_path\nfrom conans.errors import ConanException\n\nCONAN_VCVARS_FILE = \"conanvcvars.bat\"\n\n\ndef msvc_version_to_vs_ide_version(version):\n _visuals = {'190': '14',\n '191': '15',\n '192': '16',\n '193': '17'}\n return _visuals[str(version)]\n\n\nclass VCVars:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n\n def generate(self, scope=\"build\"):\n \"\"\"\n write a conanvcvars.bat file with the good args from settings\n \"\"\"\n conanfile = self._conanfile\n os_ = conanfile.settings.get_safe(\"os\")\n if os_ != \"Windows\":\n return\n\n compiler = conanfile.settings.get_safe(\"compiler\")\n if compiler != \"Visual Studio\" and compiler != \"msvc\":\n return\n\n vs_version = vs_ide_version(conanfile)\n vcvarsarch = vcvars_arch(conanfile)\n vcvars_ver = _vcvars_vers(conanfile, compiler, vs_version)\n\n vs_install_path = conanfile.conf[\"tools.microsoft.msbuild:installation_path\"]\n # The vs_install_path is like\n # C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\n # C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\n # C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\n vcvars = vcvars_command(vs_version, architecture=vcvarsarch, platform_type=None,\n winsdk_version=None, vcvars_ver=vcvars_ver,\n vs_install_path=vs_install_path)\n\n content = textwrap.dedent(\"\"\"\\\n @echo off\n {}\n \"\"\".format(vcvars))\n from conan.tools.env.environment import create_env_script\n create_env_script(conanfile, content, CONAN_VCVARS_FILE, scope)\n\n\ndef vs_ide_version(conanfile):\n compiler = conanfile.settings.get_safe(\"compiler\")\n compiler_version = (conanfile.settings.get_safe(\"compiler.base.version\") or\n conanfile.settings.get_safe(\"compiler.version\"))\n if compiler == \"msvc\":\n toolset_override = conanfile.conf[\"tools.microsoft.msbuild:vs_version\"]\n if toolset_override:\n visual_version = toolset_override\n else:\n visual_version = msvc_version_to_vs_ide_version(compiler_version)\n else:\n visual_version = compiler_version\n return visual_version\n\n\ndef msvc_runtime_flag(conanfile):\n settings = conanfile.settings\n compiler = settings.get_safe(\"compiler\")\n runtime = settings.get_safe(\"compiler.runtime\")\n if compiler == \"Visual Studio\":\n return runtime\n if compiler == \"msvc\" or compiler == \"intel-cc\":\n runtime_type = settings.get_safe(\"compiler.runtime_type\")\n runtime = \"MT\" if runtime == \"static\" else \"MD\"\n if runtime_type == \"Debug\":\n runtime = \"{}d\".format(runtime)\n return runtime\n\n\ndef vcvars_command(version, architecture=None, platform_type=None, winsdk_version=None,\n vcvars_ver=None, start_dir_cd=True, vs_install_path=None):\n \"\"\" conan-agnostic construction of vcvars command\n https://docs.microsoft.com/en-us/cpp/build/building-on-the-command-line\n \"\"\"\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n cmd = []\n if start_dir_cd:\n cmd.append('set \"VSCMD_START_DIR=%CD%\" &&')\n\n # The \"call\" is useful in case it is called from another .bat script\n cmd.append('call \"%s\" ' % _vcvars_path(version, vs_install_path))\n if architecture:\n cmd.append(architecture)\n if platform_type:\n cmd.append(platform_type)\n if winsdk_version:\n cmd.append(winsdk_version)\n if vcvars_ver:\n cmd.append(\"-vcvars_ver=%s\" % vcvars_ver)\n return \" \".join(cmd)\n\n\ndef _vcvars_path(version, vs_install_path):\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n vs_path = vs_install_path or vs_installation_path(version)\n if not vs_path or not os.path.isdir(vs_path):\n raise ConanException(\"VS non-existing installation: Visual Studio %s\" % version)\n\n if int(version) > 14:\n vcpath = os.path.join(vs_path, \"VC/Auxiliary/Build/vcvarsall.bat\")\n else:\n vcpath = os.path.join(vs_path, \"VC/vcvarsall.bat\")\n return vcpath\n\n\ndef vcvars_arch(conanfile):\n \"\"\"\n computes the vcvars command line architecture based on conanfile settings (host) and\n settings_build\n :param conanfile:\n :return:\n \"\"\"\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n settings_host = conanfile.settings\n try:\n settings_build = conanfile.settings_build\n except AttributeError:\n settings_build = settings_host\n\n arch_host = str(settings_host.arch)\n arch_build = str(settings_build.arch)\n\n arch = None\n if arch_build == 'x86_64':\n arch = {'x86': \"amd64_x86\",\n 'x86_64': 'amd64',\n 'armv7': 'amd64_arm',\n 'armv8': 'amd64_arm64'}.get(arch_host)\n elif arch_build == 'x86':\n arch = {'x86': 'x86',\n 'x86_64': 'x86_amd64',\n 'armv7': 'x86_arm',\n 'armv8': 'x86_arm64'}.get(arch_host)\n\n if not arch:\n raise ConanException('vcvars unsupported architectures %s-%s' % (arch_build, arch_host))\n\n return arch\n\n\ndef _vcvars_vers(conanfile, compiler, vs_version):\n if int(vs_version) <= 14:\n return None\n\n vcvars_ver = None\n if compiler == \"Visual Studio\":\n toolset = conanfile.settings.get_safe(\"compiler.toolset\")\n if toolset is not None:\n vcvars_ver = {\"v140\": \"14.0\",\n \"v141\": \"14.1\",\n \"v142\": \"14.2\",\n \"v143\": \"14.3\"}.get(toolset)\n else:\n assert compiler == \"msvc\"\n # Code similar to CMakeToolchain toolset one\n compiler_version = str(conanfile.settings.compiler.version)\n # The equivalent of compiler 192 is toolset 14.2\n vcvars_ver = \"14.{}\".format(compiler_version[-1])\n return vcvars_ver\n", "path": "conan/tools/microsoft/visual.py"}], "after_files": [{"content": "from conan.tools.microsoft.toolchain import MSBuildToolchain\nfrom conan.tools.microsoft.msbuild import MSBuild\nfrom conan.tools.microsoft.msbuilddeps import MSBuildDeps\nfrom conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc\nfrom conan.tools.microsoft.subsystems import subsystem_path\n", "path": "conan/tools/microsoft/__init__.py"}, {"content": "import os\nimport textwrap\n\nfrom conans.client.tools import vs_installation_path\nfrom conans.errors import ConanException\n\nCONAN_VCVARS_FILE = \"conanvcvars.bat\"\n\n\ndef msvc_version_to_vs_ide_version(version):\n _visuals = {'190': '14',\n '191': '15',\n '192': '16',\n '193': '17'}\n return _visuals[str(version)]\n\n\nclass VCVars:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n\n def generate(self, scope=\"build\"):\n \"\"\"\n write a conanvcvars.bat file with the good args from settings\n \"\"\"\n conanfile = self._conanfile\n os_ = conanfile.settings.get_safe(\"os\")\n if os_ != \"Windows\":\n return\n\n compiler = conanfile.settings.get_safe(\"compiler\")\n if compiler != \"Visual Studio\" and compiler != \"msvc\":\n return\n\n vs_version = vs_ide_version(conanfile)\n vcvarsarch = vcvars_arch(conanfile)\n vcvars_ver = _vcvars_vers(conanfile, compiler, vs_version)\n\n vs_install_path = conanfile.conf[\"tools.microsoft.msbuild:installation_path\"]\n # The vs_install_path is like\n # C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\n # C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\n # C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\n vcvars = vcvars_command(vs_version, architecture=vcvarsarch, platform_type=None,\n winsdk_version=None, vcvars_ver=vcvars_ver,\n vs_install_path=vs_install_path)\n\n content = textwrap.dedent(\"\"\"\\\n @echo off\n {}\n \"\"\".format(vcvars))\n from conan.tools.env.environment import create_env_script\n create_env_script(conanfile, content, CONAN_VCVARS_FILE, scope)\n\n\ndef vs_ide_version(conanfile):\n compiler = conanfile.settings.get_safe(\"compiler\")\n compiler_version = (conanfile.settings.get_safe(\"compiler.base.version\") or\n conanfile.settings.get_safe(\"compiler.version\"))\n if compiler == \"msvc\":\n toolset_override = conanfile.conf[\"tools.microsoft.msbuild:vs_version\"]\n if toolset_override:\n visual_version = toolset_override\n else:\n visual_version = msvc_version_to_vs_ide_version(compiler_version)\n else:\n visual_version = compiler_version\n return visual_version\n\n\ndef msvc_runtime_flag(conanfile):\n settings = conanfile.settings\n compiler = settings.get_safe(\"compiler\")\n runtime = settings.get_safe(\"compiler.runtime\")\n if compiler == \"Visual Studio\":\n return runtime\n if compiler == \"msvc\" or compiler == \"intel-cc\":\n runtime_type = settings.get_safe(\"compiler.runtime_type\")\n runtime = \"MT\" if runtime == \"static\" else \"MD\"\n if runtime_type == \"Debug\":\n runtime = \"{}d\".format(runtime)\n return runtime\n\n\ndef vcvars_command(version, architecture=None, platform_type=None, winsdk_version=None,\n vcvars_ver=None, start_dir_cd=True, vs_install_path=None):\n \"\"\" conan-agnostic construction of vcvars command\n https://docs.microsoft.com/en-us/cpp/build/building-on-the-command-line\n \"\"\"\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n cmd = []\n if start_dir_cd:\n cmd.append('set \"VSCMD_START_DIR=%CD%\" &&')\n\n # The \"call\" is useful in case it is called from another .bat script\n cmd.append('call \"%s\" ' % _vcvars_path(version, vs_install_path))\n if architecture:\n cmd.append(architecture)\n if platform_type:\n cmd.append(platform_type)\n if winsdk_version:\n cmd.append(winsdk_version)\n if vcvars_ver:\n cmd.append(\"-vcvars_ver=%s\" % vcvars_ver)\n return \" \".join(cmd)\n\n\ndef _vcvars_path(version, vs_install_path):\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n vs_path = vs_install_path or vs_installation_path(version)\n if not vs_path or not os.path.isdir(vs_path):\n raise ConanException(\"VS non-existing installation: Visual Studio %s\" % version)\n\n if int(version) > 14:\n vcpath = os.path.join(vs_path, \"VC/Auxiliary/Build/vcvarsall.bat\")\n else:\n vcpath = os.path.join(vs_path, \"VC/vcvarsall.bat\")\n return vcpath\n\n\ndef vcvars_arch(conanfile):\n \"\"\"\n computes the vcvars command line architecture based on conanfile settings (host) and\n settings_build\n :param conanfile:\n :return:\n \"\"\"\n # TODO: This comes from conans/client/tools/win.py vcvars_command()\n settings_host = conanfile.settings\n try:\n settings_build = conanfile.settings_build\n except AttributeError:\n settings_build = settings_host\n\n arch_host = str(settings_host.arch)\n arch_build = str(settings_build.arch)\n\n arch = None\n if arch_build == 'x86_64':\n arch = {'x86': \"amd64_x86\",\n 'x86_64': 'amd64',\n 'armv7': 'amd64_arm',\n 'armv8': 'amd64_arm64'}.get(arch_host)\n elif arch_build == 'x86':\n arch = {'x86': 'x86',\n 'x86_64': 'x86_amd64',\n 'armv7': 'x86_arm',\n 'armv8': 'x86_arm64'}.get(arch_host)\n\n if not arch:\n raise ConanException('vcvars unsupported architectures %s-%s' % (arch_build, arch_host))\n\n return arch\n\n\ndef _vcvars_vers(conanfile, compiler, vs_version):\n if int(vs_version) <= 14:\n return None\n\n vcvars_ver = None\n if compiler == \"Visual Studio\":\n toolset = conanfile.settings.get_safe(\"compiler.toolset\")\n if toolset is not None:\n vcvars_ver = {\"v140\": \"14.0\",\n \"v141\": \"14.1\",\n \"v142\": \"14.2\",\n \"v143\": \"14.3\"}.get(toolset)\n else:\n assert compiler == \"msvc\"\n # Code similar to CMakeToolchain toolset one\n compiler_version = str(conanfile.settings.compiler.version)\n # The equivalent of compiler 192 is toolset 14.2\n vcvars_ver = \"14.{}\".format(compiler_version[-1])\n return vcvars_ver\n\n\ndef is_msvc(conanfile):\n \"\"\" Validate if current compiler in setttings is 'Visual Studio' or 'msvc'\n :param conanfile: ConanFile instance\n :return: True, if the host compiler is related to Visual Studio, otherwise, False.\n \"\"\"\n settings = conanfile.settings\n return settings.get_safe(\"compiler\") in [\"Visual Studio\", \"msvc\"]\n", "path": "conan/tools/microsoft/visual.py"}]}
| 2,458 | 339 |
gh_patches_debug_28236
|
rasdani/github-patches
|
git_diff
|
talonhub__community-763
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do we need _capitalize_defaults now that Talon lexicon includes capitalization?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/vocabulary.py`
Content:
```
1 import logging
2 from typing import Dict, Sequence
3
4 from talon import Context, Module, actions
5 from .user_settings import get_list_from_csv
6
7 mod = Module()
8 ctx = Context()
9
10 mod.list("vocabulary", desc="additional vocabulary words")
11
12
13 # Default words that will need to be capitalized (particularly under w2l).
14 # NB. These defaults and those later in this file are ONLY used when
15 # auto-creating the corresponding settings/*.csv files. Those csv files
16 # determine the contents of user.vocabulary and dictate.word_map. Once they
17 # exist, the contents of the lists/dictionaries below are irrelevant.
18 _capitalize_defaults = [
19 "I",
20 "I'm",
21 "I've",
22 "I'll",
23 "I'd",
24 "Monday",
25 "Mondays",
26 "Tuesday",
27 "Tuesdays",
28 "Wednesday",
29 "Wednesdays",
30 "Thursday",
31 "Thursdays",
32 "Friday",
33 "Fridays",
34 "Saturday",
35 "Saturdays",
36 "Sunday",
37 "Sundays",
38 "January",
39 "February",
40 # March omitted because it's a regular word too
41 "April",
42 # May omitted because it's a regular word too
43 "June",
44 "July",
45 "August",
46 "September",
47 "October",
48 "November",
49 "December",
50 ]
51
52 # Default words that need to be remapped.
53 _word_map_defaults = {
54 # E.g:
55 # "cash": "cache",
56 # This is the opposite ordering to words_to_replace.csv (the latter has the target word first)
57 }
58 _word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})
59
60
61 # phrases_to_replace is a spoken form -> written form map, used by
62 # `user.replace_phrases` to rewrite words and phrases Talon recognized.
63 # This does not change the priority with which Talon recognizes
64 # particular phrases over others.
65 phrases_to_replace = get_list_from_csv(
66 "words_to_replace.csv",
67 headers=("Replacement", "Original"),
68 default=_word_map_defaults
69 )
70
71 # "dictate.word_map" is used by `actions.dictate.replace_words`;
72 # a built-in Talon action similar to `replace_phrases`, but supporting
73 # only single-word replacements. Multi-word phrases are ignored.
74 ctx.settings["dictate.word_map"] = phrases_to_replace
75
76
77 # Default words that should be added to Talon's vocabulary.
78 # Don't edit this. Edit 'additional_vocabulary.csv' instead
79 _simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft"]
80
81 # Defaults for different pronounciations of words that need to be added to
82 # Talon's vocabulary.
83 _default_vocabulary = {
84 "N map": "nmap",
85 "under documented": "under-documented",
86 }
87 _default_vocabulary.update({word: word for word in _simple_vocab_default})
88
89 # "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't
90 # recognize. Words in user.vocabulary (or other lists and captures) are
91 # "command-like" and their recognition is prioritized over ordinary words.
92 ctx.lists["user.vocabulary"] = get_list_from_csv(
93 "additional_words.csv",
94 headers=("Word(s)", "Spoken Form (If Different)"),
95 default=_default_vocabulary,
96 )
97
98 # for quick verification of the reload
99 # print(str(ctx.settings["dictate.word_map"]))
100 # print(str(ctx.lists["user.vocabulary"]))
101
102 class PhraseReplacer:
103 """Utility for replacing phrases by other phrases inside text or word lists.
104
105 Replacing longer phrases has priority.
106
107 Args:
108 - phrase_dict: dictionary mapping recognized/spoken forms to written forms
109 """
110
111 def __init__(self, phrase_dict: Dict[str, str]):
112 # Index phrases by first word, then number of subsequent words n_next
113 phrase_index = dict()
114 for spoken_form, written_form in phrase_dict.items():
115 words = spoken_form.split()
116 if not words:
117 logging.warning("Found empty spoken form for written form"
118 f"{written_form}, ignored")
119 continue
120 first_word, n_next = words[0], len(words) - 1
121 phrase_index.setdefault(first_word, {}) \
122 .setdefault(n_next, {})[tuple(words[1:])] = written_form
123
124 # Sort n_next index so longer phrases have priority
125 self.phrase_index = {
126 first_word: list(sorted(same_first_word.items(), key=lambda x: -x[0]))
127 for first_word, same_first_word in phrase_index.items()
128 }
129
130 def replace(self, input_words: Sequence[str]) -> Sequence[str]:
131 input_words = tuple(input_words) # tuple to ensure hashability of slices
132 output_words = []
133 first_word_i = 0
134 while first_word_i < len(input_words):
135 first_word = input_words[first_word_i]
136 next_word_i = first_word_i + 1
137 # Could this word be the first of a phrase we should replace?
138 for n_next, phrases_n_next in self.phrase_index.get(first_word, []):
139 # Yes. Perhaps a phrase with n_next subsequent words?
140 continuation = input_words[next_word_i : next_word_i + n_next]
141 if continuation in phrases_n_next:
142 # Found a match!
143 output_words.append(phrases_n_next[continuation])
144 first_word_i += 1 + n_next
145 break
146 else:
147 # No match, just add the word to the result
148 output_words.append(first_word)
149 first_word_i += 1
150 return output_words
151
152 # Wrapper used for testing.
153 def replace_string(self, text: str) -> str:
154 return ' '.join(self.replace(text.split()))
155
156 # Unit tests for PhraseReplacer
157 rep = PhraseReplacer({
158 'this': 'foo',
159 'that': 'bar',
160 'this is': 'stopping early',
161 'this is a test': 'it worked!',
162 })
163 assert rep.replace_string('gnork') == 'gnork'
164 assert rep.replace_string('this') == 'foo'
165 assert rep.replace_string('this that this') == 'foo bar foo'
166 assert rep.replace_string('this is a test') == 'it worked!'
167 assert rep.replace_string('well this is a test really') == 'well it worked! really'
168 assert rep.replace_string('try this is too') == 'try stopping early too'
169 assert rep.replace_string('this is a tricky one') == 'stopping early a tricky one'
170
171 phrase_replacer = PhraseReplacer(phrases_to_replace)
172
173 @mod.action_class
174 class Actions:
175 def replace_phrases(words: Sequence[str]) -> Sequence[str]:
176 """Replace phrases according to words_to_replace.csv"""
177 try:
178 return phrase_replacer.replace(words)
179 except:
180 # fall back to dictate.replace_words for error-robustness
181 logging.error("phrase replacer failed!")
182 return actions.dictate.replace_words(words)
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/code/vocabulary.py b/code/vocabulary.py
--- a/code/vocabulary.py
+++ b/code/vocabulary.py
@@ -10,31 +10,15 @@
mod.list("vocabulary", desc="additional vocabulary words")
-# Default words that will need to be capitalized (particularly under w2l).
-# NB. These defaults and those later in this file are ONLY used when
+# Default words that will need to be capitalized.
+# DON'T EDIT THIS. Edit settings/words_to_replace.csv instead.
+# These defaults and those later in this file are ONLY used when
# auto-creating the corresponding settings/*.csv files. Those csv files
# determine the contents of user.vocabulary and dictate.word_map. Once they
# exist, the contents of the lists/dictionaries below are irrelevant.
_capitalize_defaults = [
- "I",
- "I'm",
- "I've",
- "I'll",
- "I'd",
- "Monday",
- "Mondays",
- "Tuesday",
- "Tuesdays",
- "Wednesday",
- "Wednesdays",
- "Thursday",
- "Thursdays",
- "Friday",
- "Fridays",
- "Saturday",
- "Saturdays",
- "Sunday",
- "Sundays",
+ # NB. the lexicon now capitalizes January/February by default, but not the
+ # others below. Not sure why.
"January",
"February",
# March omitted because it's a regular word too
@@ -42,7 +26,7 @@
# May omitted because it's a regular word too
"June",
"July",
- "August",
+ "August", # technically also an adjective but the month is far more common
"September",
"October",
"November",
|
{"golden_diff": "diff --git a/code/vocabulary.py b/code/vocabulary.py\n--- a/code/vocabulary.py\n+++ b/code/vocabulary.py\n@@ -10,31 +10,15 @@\n mod.list(\"vocabulary\", desc=\"additional vocabulary words\")\n \n \n-# Default words that will need to be capitalized (particularly under w2l).\n-# NB. These defaults and those later in this file are ONLY used when\n+# Default words that will need to be capitalized.\n+# DON'T EDIT THIS. Edit settings/words_to_replace.csv instead.\n+# These defaults and those later in this file are ONLY used when\n # auto-creating the corresponding settings/*.csv files. Those csv files\n # determine the contents of user.vocabulary and dictate.word_map. Once they\n # exist, the contents of the lists/dictionaries below are irrelevant.\n _capitalize_defaults = [\n- \"I\",\n- \"I'm\",\n- \"I've\",\n- \"I'll\",\n- \"I'd\",\n- \"Monday\",\n- \"Mondays\",\n- \"Tuesday\",\n- \"Tuesdays\",\n- \"Wednesday\",\n- \"Wednesdays\",\n- \"Thursday\",\n- \"Thursdays\",\n- \"Friday\",\n- \"Fridays\",\n- \"Saturday\",\n- \"Saturdays\",\n- \"Sunday\",\n- \"Sundays\",\n+ # NB. the lexicon now capitalizes January/February by default, but not the\n+ # others below. Not sure why.\n \"January\",\n \"February\",\n # March omitted because it's a regular word too\n@@ -42,7 +26,7 @@\n # May omitted because it's a regular word too\n \"June\",\n \"July\",\n- \"August\",\n+ \"August\", # technically also an adjective but the month is far more common\n \"September\",\n \"October\",\n \"November\",\n", "issue": "Do we need _capitalize_defaults now that Talon lexicon includes capitalization?\n\n", "before_files": [{"content": "import logging\nfrom typing import Dict, Sequence\n\nfrom talon import Context, Module, actions\nfrom .user_settings import get_list_from_csv\n\nmod = Module()\nctx = Context()\n\nmod.list(\"vocabulary\", desc=\"additional vocabulary words\")\n\n\n# Default words that will need to be capitalized (particularly under w2l).\n# NB. These defaults and those later in this file are ONLY used when\n# auto-creating the corresponding settings/*.csv files. Those csv files\n# determine the contents of user.vocabulary and dictate.word_map. Once they\n# exist, the contents of the lists/dictionaries below are irrelevant.\n_capitalize_defaults = [\n \"I\",\n \"I'm\",\n \"I've\",\n \"I'll\",\n \"I'd\",\n \"Monday\",\n \"Mondays\",\n \"Tuesday\",\n \"Tuesdays\",\n \"Wednesday\",\n \"Wednesdays\",\n \"Thursday\",\n \"Thursdays\",\n \"Friday\",\n \"Fridays\",\n \"Saturday\",\n \"Saturdays\",\n \"Sunday\",\n \"Sundays\",\n \"January\",\n \"February\",\n # March omitted because it's a regular word too\n \"April\",\n # May omitted because it's a regular word too\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n]\n\n# Default words that need to be remapped.\n_word_map_defaults = {\n # E.g:\n # \"cash\": \"cache\",\n # This is the opposite ordering to words_to_replace.csv (the latter has the target word first)\n}\n_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})\n\n\n# phrases_to_replace is a spoken form -> written form map, used by\n# `user.replace_phrases` to rewrite words and phrases Talon recognized.\n# This does not change the priority with which Talon recognizes\n# particular phrases over others.\nphrases_to_replace = get_list_from_csv(\n \"words_to_replace.csv\",\n headers=(\"Replacement\", \"Original\"),\n default=_word_map_defaults\n)\n\n# \"dictate.word_map\" is used by `actions.dictate.replace_words`;\n# a built-in Talon action similar to `replace_phrases`, but supporting\n# only single-word replacements. Multi-word phrases are ignored.\nctx.settings[\"dictate.word_map\"] = phrases_to_replace\n\n\n# Default words that should be added to Talon's vocabulary.\n# Don't edit this. Edit 'additional_vocabulary.csv' instead\n_simple_vocab_default = [\"nmap\", \"admin\", \"Cisco\", \"Citrix\", \"VPN\", \"DNS\", \"Minecraft\"]\n\n# Defaults for different pronounciations of words that need to be added to\n# Talon's vocabulary.\n_default_vocabulary = {\n \"N map\": \"nmap\",\n \"under documented\": \"under-documented\",\n}\n_default_vocabulary.update({word: word for word in _simple_vocab_default})\n\n# \"user.vocabulary\" is used to explicitly add words/phrases that Talon doesn't\n# recognize. Words in user.vocabulary (or other lists and captures) are\n# \"command-like\" and their recognition is prioritized over ordinary words.\nctx.lists[\"user.vocabulary\"] = get_list_from_csv(\n \"additional_words.csv\",\n headers=(\"Word(s)\", \"Spoken Form (If Different)\"),\n default=_default_vocabulary,\n)\n\n# for quick verification of the reload\n# print(str(ctx.settings[\"dictate.word_map\"]))\n# print(str(ctx.lists[\"user.vocabulary\"]))\n\nclass PhraseReplacer:\n \"\"\"Utility for replacing phrases by other phrases inside text or word lists.\n\n Replacing longer phrases has priority.\n\n Args:\n - phrase_dict: dictionary mapping recognized/spoken forms to written forms\n \"\"\"\n\n def __init__(self, phrase_dict: Dict[str, str]):\n # Index phrases by first word, then number of subsequent words n_next\n phrase_index = dict()\n for spoken_form, written_form in phrase_dict.items():\n words = spoken_form.split()\n if not words:\n logging.warning(\"Found empty spoken form for written form\"\n f\"{written_form}, ignored\")\n continue\n first_word, n_next = words[0], len(words) - 1\n phrase_index.setdefault(first_word, {}) \\\n .setdefault(n_next, {})[tuple(words[1:])] = written_form\n\n # Sort n_next index so longer phrases have priority\n self.phrase_index = {\n first_word: list(sorted(same_first_word.items(), key=lambda x: -x[0]))\n for first_word, same_first_word in phrase_index.items()\n }\n\n def replace(self, input_words: Sequence[str]) -> Sequence[str]:\n input_words = tuple(input_words) # tuple to ensure hashability of slices\n output_words = []\n first_word_i = 0\n while first_word_i < len(input_words):\n first_word = input_words[first_word_i]\n next_word_i = first_word_i + 1\n # Could this word be the first of a phrase we should replace?\n for n_next, phrases_n_next in self.phrase_index.get(first_word, []):\n # Yes. Perhaps a phrase with n_next subsequent words?\n continuation = input_words[next_word_i : next_word_i + n_next]\n if continuation in phrases_n_next:\n # Found a match!\n output_words.append(phrases_n_next[continuation])\n first_word_i += 1 + n_next\n break\n else:\n # No match, just add the word to the result\n output_words.append(first_word)\n first_word_i += 1\n return output_words\n\n # Wrapper used for testing.\n def replace_string(self, text: str) -> str:\n return ' '.join(self.replace(text.split()))\n\n# Unit tests for PhraseReplacer\nrep = PhraseReplacer({\n 'this': 'foo',\n 'that': 'bar',\n 'this is': 'stopping early',\n 'this is a test': 'it worked!',\n})\nassert rep.replace_string('gnork') == 'gnork'\nassert rep.replace_string('this') == 'foo'\nassert rep.replace_string('this that this') == 'foo bar foo'\nassert rep.replace_string('this is a test') == 'it worked!'\nassert rep.replace_string('well this is a test really') == 'well it worked! really'\nassert rep.replace_string('try this is too') == 'try stopping early too'\nassert rep.replace_string('this is a tricky one') == 'stopping early a tricky one'\n\nphrase_replacer = PhraseReplacer(phrases_to_replace)\n\[email protected]_class\nclass Actions:\n def replace_phrases(words: Sequence[str]) -> Sequence[str]:\n \"\"\"Replace phrases according to words_to_replace.csv\"\"\"\n try:\n return phrase_replacer.replace(words)\n except:\n # fall back to dictate.replace_words for error-robustness\n logging.error(\"phrase replacer failed!\")\n return actions.dictate.replace_words(words)\n", "path": "code/vocabulary.py"}], "after_files": [{"content": "import logging\nfrom typing import Dict, Sequence\n\nfrom talon import Context, Module, actions\nfrom .user_settings import get_list_from_csv\n\nmod = Module()\nctx = Context()\n\nmod.list(\"vocabulary\", desc=\"additional vocabulary words\")\n\n\n# Default words that will need to be capitalized.\n# DON'T EDIT THIS. Edit settings/words_to_replace.csv instead.\n# These defaults and those later in this file are ONLY used when\n# auto-creating the corresponding settings/*.csv files. Those csv files\n# determine the contents of user.vocabulary and dictate.word_map. Once they\n# exist, the contents of the lists/dictionaries below are irrelevant.\n_capitalize_defaults = [\n # NB. the lexicon now capitalizes January/February by default, but not the\n # others below. Not sure why.\n \"January\",\n \"February\",\n # March omitted because it's a regular word too\n \"April\",\n # May omitted because it's a regular word too\n \"June\",\n \"July\",\n \"August\", # technically also an adjective but the month is far more common\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n]\n\n# Default words that need to be remapped.\n_word_map_defaults = {\n # E.g:\n # \"cash\": \"cache\",\n # This is the opposite ordering to words_to_replace.csv (the latter has the target word first)\n}\n_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})\n\n\n# phrases_to_replace is a spoken form -> written form map, used by\n# `user.replace_phrases` to rewrite words and phrases Talon recognized.\n# This does not change the priority with which Talon recognizes\n# particular phrases over others.\nphrases_to_replace = get_list_from_csv(\n \"words_to_replace.csv\",\n headers=(\"Replacement\", \"Original\"),\n default=_word_map_defaults\n)\n\n# \"dictate.word_map\" is used by `actions.dictate.replace_words`;\n# a built-in Talon action similar to `replace_phrases`, but supporting\n# only single-word replacements. Multi-word phrases are ignored.\nctx.settings[\"dictate.word_map\"] = phrases_to_replace\n\n\n# Default words that should be added to Talon's vocabulary.\n# Don't edit this. Edit 'additional_vocabulary.csv' instead\n_simple_vocab_default = [\"nmap\", \"admin\", \"Cisco\", \"Citrix\", \"VPN\", \"DNS\", \"Minecraft\"]\n\n# Defaults for different pronounciations of words that need to be added to\n# Talon's vocabulary.\n_default_vocabulary = {\n \"N map\": \"nmap\",\n \"under documented\": \"under-documented\",\n}\n_default_vocabulary.update({word: word for word in _simple_vocab_default})\n\n# \"user.vocabulary\" is used to explicitly add words/phrases that Talon doesn't\n# recognize. Words in user.vocabulary (or other lists and captures) are\n# \"command-like\" and their recognition is prioritized over ordinary words.\nctx.lists[\"user.vocabulary\"] = get_list_from_csv(\n \"additional_words.csv\",\n headers=(\"Word(s)\", \"Spoken Form (If Different)\"),\n default=_default_vocabulary,\n)\n\n# for quick verification of the reload\n# print(str(ctx.settings[\"dictate.word_map\"]))\n# print(str(ctx.lists[\"user.vocabulary\"]))\n\nclass PhraseReplacer:\n \"\"\"Utility for replacing phrases by other phrases inside text or word lists.\n\n Replacing longer phrases has priority.\n\n Args:\n - phrase_dict: dictionary mapping recognized/spoken forms to written forms\n \"\"\"\n\n def __init__(self, phrase_dict: Dict[str, str]):\n # Index phrases by first word, then number of subsequent words n_next\n phrase_index = dict()\n for spoken_form, written_form in phrase_dict.items():\n words = spoken_form.split()\n if not words:\n logging.warning(\"Found empty spoken form for written form\"\n f\"{written_form}, ignored\")\n continue\n first_word, n_next = words[0], len(words) - 1\n phrase_index.setdefault(first_word, {}) \\\n .setdefault(n_next, {})[tuple(words[1:])] = written_form\n\n # Sort n_next index so longer phrases have priority\n self.phrase_index = {\n first_word: list(sorted(same_first_word.items(), key=lambda x: -x[0]))\n for first_word, same_first_word in phrase_index.items()\n }\n\n def replace(self, input_words: Sequence[str]) -> Sequence[str]:\n input_words = tuple(input_words) # tuple to ensure hashability of slices\n output_words = []\n first_word_i = 0\n while first_word_i < len(input_words):\n first_word = input_words[first_word_i]\n next_word_i = first_word_i + 1\n # Could this word be the first of a phrase we should replace?\n for n_next, phrases_n_next in self.phrase_index.get(first_word, []):\n # Yes. Perhaps a phrase with n_next subsequent words?\n continuation = input_words[next_word_i : next_word_i + n_next]\n if continuation in phrases_n_next:\n # Found a match!\n output_words.append(phrases_n_next[continuation])\n first_word_i += 1 + n_next\n break\n else:\n # No match, just add the word to the result\n output_words.append(first_word)\n first_word_i += 1\n return output_words\n\n # Wrapper used for testing.\n def replace_string(self, text: str) -> str:\n return ' '.join(self.replace(text.split()))\n\n# Unit tests for PhraseReplacer\nrep = PhraseReplacer({\n 'this': 'foo',\n 'that': 'bar',\n 'this is': 'stopping early',\n 'this is a test': 'it worked!',\n})\nassert rep.replace_string('gnork') == 'gnork'\nassert rep.replace_string('this') == 'foo'\nassert rep.replace_string('this that this') == 'foo bar foo'\nassert rep.replace_string('this is a test') == 'it worked!'\nassert rep.replace_string('well this is a test really') == 'well it worked! really'\nassert rep.replace_string('try this is too') == 'try stopping early too'\nassert rep.replace_string('this is a tricky one') == 'stopping early a tricky one'\n\nphrase_replacer = PhraseReplacer(phrases_to_replace)\n\[email protected]_class\nclass Actions:\n def replace_phrases(words: Sequence[str]) -> Sequence[str]:\n \"\"\"Replace phrases according to words_to_replace.csv\"\"\"\n try:\n return phrase_replacer.replace(words)\n except:\n # fall back to dictate.replace_words for error-robustness\n logging.error(\"phrase replacer failed!\")\n return actions.dictate.replace_words(words)\n", "path": "code/vocabulary.py"}]}
| 2,216 | 408 |
gh_patches_debug_42376
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-2750
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `wemake-plain` output formatter
It should be the same as `wemake`, but without colors. Why?
When I try to save the output to file, I get this:
```
106:29 WPS220 Found too deep nesting: 28 > 20
[34mtry[39;49;00m:
^
```
And I want to have this:
```
106:29 WPS220 Found too deep nesting: 28 > 20
try:
^
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/formatter.py`
Content:
```
1 """
2 Our very own ``flake8`` formatter for better error messages.
3
4 That's how all ``flake8`` formatters work:
5
6 .. mermaid::
7 :caption: ``flake8`` formatting API calls order.
8
9 graph LR
10 F2[start] --> F3[after_init]
11 F3 --> F4[start]
12 F4 --> F5[beginning]
13 F5 --> F6[handle]
14 F6 --> F7[format]
15 F6 --> F8[show_source]
16 F6 --> F9[show_statistic]
17 F7 --> F10[finished]
18 F8 --> F10[finished]
19 F9 --> F10[finished]
20 F10 -.-> F5
21 F10 --> F11[stop]
22
23 .. autoclass:: WemakeFormatter
24 :no-undoc-members:
25
26 """
27
28 from collections import defaultdict
29 from typing import ClassVar, DefaultDict, List
30
31 from flake8.formatting.base import BaseFormatter
32 from flake8.statistics import Statistics
33 from flake8.style_guide import Violation
34 from pygments import highlight
35 from pygments.formatters import TerminalFormatter
36 from pygments.lexers import PythonLexer
37 from typing_extensions import Final
38
39 from wemake_python_styleguide.version import pkg_version
40
41 #: That url is generated and hosted by Sphinx.
42 DOCS_URL_TEMPLATE: Final = (
43 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'
44 )
45
46 #: This url points to the specific violation page.
47 SHORTLINK_TEMPLATE: Final = (
48 'https://pyflak.es/{0}'
49 )
50
51
52 class WemakeFormatter(BaseFormatter): # noqa: WPS214
53 """
54 We need to format our style :term:`violations <violation>` beatifully.
55
56 The default formatter does not allow us to do that.
57 What things do we miss?
58
59 1. Spacing, everything is just mixed up and glued together
60 2. Colors and decoration, some information is easier
61 to gather just with colors or underlined text
62 3. Grouping, we need explicit grouping by filename
63 4. Incomplete and non-informative statistics
64
65 """
66
67 _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
68
69 # API:
70
71 def after_init(self):
72 """Called after the original ``init`` is used to set extra fields."""
73 self._lexer = PythonLexer()
74 self._formatter = TerminalFormatter()
75
76 # Logic:
77 self._processed_filenames: List[str] = []
78 self._error_count = 0
79
80 def handle(self, error: Violation) -> None: # noqa: WPS110
81 """Processes each :term:`violation` to print it and all related."""
82 if error.filename not in self._processed_filenames:
83 self._print_header(error.filename)
84 self._processed_filenames.append(error.filename)
85
86 line = self.format(error)
87 source = self.show_source(error)
88 link = self._show_link(error)
89
90 self._write(line)
91 if link:
92 self._write(link)
93 if source:
94 self._write(source)
95
96 self._error_count += 1
97
98 def format(self, error: Violation) -> str: # noqa: WPS125
99 """Called to format each individual :term:`violation`."""
100 return '{newline} {row_col:<8} {code:<5} {text}'.format(
101 newline=self.newline if self._should_show_source(error) else '',
102 code=error.code,
103 text=error.text,
104 row_col='{0}:{1}'.format(error.line_number, error.column_number),
105 )
106
107 def show_source(self, error: Violation) -> str:
108 """Called when ``--show-source`` option is provided."""
109 if not self._should_show_source(error):
110 return ''
111
112 formatted_line = error.physical_line.lstrip()
113 adjust = len(error.physical_line) - len(formatted_line)
114
115 code = _highlight(
116 formatted_line,
117 self._lexer,
118 self._formatter,
119 )
120
121 return ' {code} {spacing}^'.format(
122 code=code,
123 spacing=' ' * (error.column_number - 1 - adjust),
124 )
125
126 def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210
127 """Called when ``--statistic`` option is passed."""
128 all_errors = 0
129 for error_code in statistics.error_codes():
130 stats_for_error_code = statistics.statistics_for(error_code)
131 statistic = next(stats_for_error_code)
132
133 count = statistic.count
134 count += sum(stat.count for stat in stats_for_error_code)
135 all_errors += count
136 error_by_file = _count_per_filename(statistics, error_code)
137
138 self._print_violation_per_file(
139 statistic,
140 error_code,
141 count,
142 error_by_file,
143 )
144
145 self._write(self.newline)
146 self._write(_underline(_bold('All errors: {0}'.format(all_errors))))
147
148 def stop(self) -> None:
149 """Runs once per app when the formatting ends."""
150 if self._error_count:
151 message = '{0}Full list of violations and explanations:{0}{1}'
152 self._write(message.format(self.newline, self._doc_url))
153
154 # Our own methods:
155
156 def _show_link(self, error: Violation) -> str:
157 """Called when ``--show-violation-links`` option is provided."""
158 if not self.options.show_violation_links:
159 return ''
160
161 return ' {spacing}-> {link}'.format(
162 spacing=' ' * 9,
163 link=SHORTLINK_TEMPLATE.format(error.code),
164 )
165
166 def _print_header(self, filename: str) -> None:
167 self._write(
168 '{newline}{filename}'.format(
169 filename=_underline(_bold(filename)),
170 newline=self.newline,
171 ),
172 )
173
174 def _print_violation_per_file(
175 self,
176 statistic: Statistics,
177 error_code: str,
178 count: int,
179 error_by_file: DefaultDict[str, int],
180 ):
181 self._write(
182 '{newline}{error_code}: {message}'.format(
183 newline=self.newline,
184 error_code=_bold(error_code),
185 message=statistic.message,
186 ),
187 )
188 for filename, error_count in error_by_file.items():
189 self._write(
190 ' {error_count:<5} {filename}'.format(
191 error_count=error_count,
192 filename=filename,
193 ),
194 )
195 self._write(_underline('Total: {0}'.format(count)))
196
197 def _should_show_source(self, error: Violation) -> bool:
198 return self.options.show_source and error.physical_line is not None
199
200
201 # Formatting text:
202
203 def _bold(text: str) -> str:
204 r"""
205 Returns bold formatted text.
206
207 >>> _bold('Hello!')
208 '\x1b[1mHello!\x1b[0m'
209
210 """
211 return '\033[1m{0}\033[0m'.format(text)
212
213
214 def _underline(text: str) -> str:
215 r"""
216 Returns underlined formatted text.
217
218 >>> _underline('Hello!')
219 '\x1b[4mHello!\x1b[0m'
220
221 """
222 return '\033[4m{0}\033[0m'.format(text)
223
224
225 def _highlight(source: str, lexer, formatter) -> str:
226 """
227 Highlights source code. Might fail.
228
229 See also:
230 https://github.com/wemake-services/wemake-python-styleguide/issues/794
231
232 """
233 try:
234 return highlight(source, lexer, formatter)
235 except Exception: # pragma: no cover
236 # Might fail on some systems, when colors are set incorrectly,
237 # or not available at all. In this case code will be just text.
238 return source
239
240
241 # Helpers:
242
243 def _count_per_filename(
244 statistics: Statistics,
245 error_code: str,
246 ) -> DefaultDict[str, int]:
247 filenames: DefaultDict[str, int] = defaultdict(int)
248 stats_for_error_code = statistics.statistics_for(error_code)
249
250 for stat in stats_for_error_code:
251 filenames[stat.filename] += stat.count
252
253 return filenames
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py
--- a/wemake_python_styleguide/formatter.py
+++ b/wemake_python_styleguide/formatter.py
@@ -26,7 +26,8 @@
"""
from collections import defaultdict
-from typing import ClassVar, DefaultDict, List
+from os import environ
+from typing import ClassVar, DefaultDict, Final, List
from flake8.formatting.base import BaseFormatter
from flake8.statistics import Statistics
@@ -34,19 +35,20 @@
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
-from typing_extensions import Final
from wemake_python_styleguide.version import pkg_version
#: That url is generated and hosted by Sphinx.
-DOCS_URL_TEMPLATE: Final = (
+_DOCS_URL_TEMPLATE: Final = (
'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'
)
#: This url points to the specific violation page.
-SHORTLINK_TEMPLATE: Final = (
- 'https://pyflak.es/{0}'
-)
+_SHORTLINK_TEMPLATE: Final = 'https://pyflak.es/{0}'
+
+#: Option to disable any code highlight and text output format.
+#: See https://no-color.org
+_NO_COLOR: Final = environ.get('NO_COLOR', '0') == '1'
class WemakeFormatter(BaseFormatter): # noqa: WPS214
@@ -64,7 +66,7 @@
"""
- _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
+ _doc_url: ClassVar[str] = _DOCS_URL_TEMPLATE.format(pkg_version)
# API:
@@ -160,7 +162,7 @@
return ' {spacing}-> {link}'.format(
spacing=' ' * 9,
- link=SHORTLINK_TEMPLATE.format(error.code),
+ link=_SHORTLINK_TEMPLATE.format(error.code),
)
def _print_header(self, filename: str) -> None:
@@ -200,36 +202,61 @@
# Formatting text:
-def _bold(text: str) -> str:
+def _bold(text: str, *, no_color: bool = _NO_COLOR) -> str:
r"""
Returns bold formatted text.
>>> _bold('Hello!')
'\x1b[1mHello!\x1b[0m'
+ Returns non-formatted text if environment variable ``NO_COLOR=1``.
+
+ >>> _bold('Hello!', no_color=True)
+ 'Hello!'
+
"""
+ if no_color:
+ return text
return '\033[1m{0}\033[0m'.format(text)
-def _underline(text: str) -> str:
+def _underline(text: str, *, no_color: bool = _NO_COLOR) -> str:
r"""
Returns underlined formatted text.
>>> _underline('Hello!')
'\x1b[4mHello!\x1b[0m'
+ Returns non-formatted text if environment variable ``NO_COLOR=1``.
+
+ >>> _underline('Hello!', no_color=True)
+ 'Hello!'
+
"""
+ if no_color:
+ return text
return '\033[4m{0}\033[0m'.format(text)
-def _highlight(source: str, lexer, formatter) -> str:
+def _highlight(
+ source: str,
+ lexer: PythonLexer,
+ formatter: TerminalFormatter,
+ *,
+ no_color: bool = _NO_COLOR,
+) -> str:
"""
Highlights source code. Might fail.
+ Returns non-formatted text if environment variable ``NO_COLOR=1``.
+
See also:
https://github.com/wemake-services/wemake-python-styleguide/issues/794
+ https://no-color.org
"""
+ if no_color:
+ return source
try:
return highlight(source, lexer, formatter)
except Exception: # pragma: no cover
|
{"golden_diff": "diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py\n--- a/wemake_python_styleguide/formatter.py\n+++ b/wemake_python_styleguide/formatter.py\n@@ -26,7 +26,8 @@\n \"\"\"\n \n from collections import defaultdict\n-from typing import ClassVar, DefaultDict, List\n+from os import environ\n+from typing import ClassVar, DefaultDict, Final, List\n \n from flake8.formatting.base import BaseFormatter\n from flake8.statistics import Statistics\n@@ -34,19 +35,20 @@\n from pygments import highlight\n from pygments.formatters import TerminalFormatter\n from pygments.lexers import PythonLexer\n-from typing_extensions import Final\n \n from wemake_python_styleguide.version import pkg_version\n \n #: That url is generated and hosted by Sphinx.\n-DOCS_URL_TEMPLATE: Final = (\n+_DOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n )\n \n #: This url points to the specific violation page.\n-SHORTLINK_TEMPLATE: Final = (\n- 'https://pyflak.es/{0}'\n-)\n+_SHORTLINK_TEMPLATE: Final = 'https://pyflak.es/{0}'\n+\n+#: Option to disable any code highlight and text output format.\n+#: See https://no-color.org\n+_NO_COLOR: Final = environ.get('NO_COLOR', '0') == '1'\n \n \n class WemakeFormatter(BaseFormatter): # noqa: WPS214\n@@ -64,7 +66,7 @@\n \n \"\"\"\n \n- _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)\n+ _doc_url: ClassVar[str] = _DOCS_URL_TEMPLATE.format(pkg_version)\n \n # API:\n \n@@ -160,7 +162,7 @@\n \n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n- link=SHORTLINK_TEMPLATE.format(error.code),\n+ link=_SHORTLINK_TEMPLATE.format(error.code),\n )\n \n def _print_header(self, filename: str) -> None:\n@@ -200,36 +202,61 @@\n \n # Formatting text:\n \n-def _bold(text: str) -> str:\n+def _bold(text: str, *, no_color: bool = _NO_COLOR) -> str:\n r\"\"\"\n Returns bold formatted text.\n \n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n \n+ Returns non-formatted text if environment variable ``NO_COLOR=1``.\n+\n+ >>> _bold('Hello!', no_color=True)\n+ 'Hello!'\n+\n \"\"\"\n+ if no_color:\n+ return text\n return '\\033[1m{0}\\033[0m'.format(text)\n \n \n-def _underline(text: str) -> str:\n+def _underline(text: str, *, no_color: bool = _NO_COLOR) -> str:\n r\"\"\"\n Returns underlined formatted text.\n \n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n \n+ Returns non-formatted text if environment variable ``NO_COLOR=1``.\n+\n+ >>> _underline('Hello!', no_color=True)\n+ 'Hello!'\n+\n \"\"\"\n+ if no_color:\n+ return text\n return '\\033[4m{0}\\033[0m'.format(text)\n \n \n-def _highlight(source: str, lexer, formatter) -> str:\n+def _highlight(\n+ source: str,\n+ lexer: PythonLexer,\n+ formatter: TerminalFormatter,\n+ *,\n+ no_color: bool = _NO_COLOR,\n+) -> str:\n \"\"\"\n Highlights source code. Might fail.\n \n+ Returns non-formatted text if environment variable ``NO_COLOR=1``.\n+\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n+ https://no-color.org\n \n \"\"\"\n+ if no_color:\n+ return source\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n", "issue": "Add `wemake-plain` output formatter\nIt should be the same as `wemake`, but without colors. Why?\r\n\r\nWhen I try to save the output to file, I get this:\r\n\r\n```\r\n 106:29 WPS220 Found too deep nesting: 28 > 20\r\n \u001b[34mtry\u001b[39;49;00m:\r\n ^\r\n```\r\n\r\nAnd I want to have this:\r\n\r\n```\r\n 106:29 WPS220 Found too deep nesting: 28 > 20\r\n try:\r\n ^\r\n```\n", "before_files": [{"content": "\"\"\"\nOur very own ``flake8`` formatter for better error messages.\n\nThat's how all ``flake8`` formatters work:\n\n.. mermaid::\n :caption: ``flake8`` formatting API calls order.\n\n graph LR\n F2[start] --> F3[after_init]\n F3 --> F4[start]\n F4 --> F5[beginning]\n F5 --> F6[handle]\n F6 --> F7[format]\n F6\t --> F8[show_source]\n F6\t --> F9[show_statistic]\n F7 --> F10[finished]\n F8 --> F10[finished]\n F9 --> F10[finished]\n F10 -.-> F5\n F10 --> F11[stop]\n\n.. autoclass:: WemakeFormatter\n :no-undoc-members:\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List\n\nfrom flake8.formatting.base import BaseFormatter\nfrom flake8.statistics import Statistics\nfrom flake8.style_guide import Violation\nfrom pygments import highlight\nfrom pygments.formatters import TerminalFormatter\nfrom pygments.lexers import PythonLexer\nfrom typing_extensions import Final\n\nfrom wemake_python_styleguide.version import pkg_version\n\n#: That url is generated and hosted by Sphinx.\nDOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n)\n\n#: This url points to the specific violation page.\nSHORTLINK_TEMPLATE: Final = (\n 'https://pyflak.es/{0}'\n)\n\n\nclass WemakeFormatter(BaseFormatter): # noqa: WPS214\n \"\"\"\n We need to format our style :term:`violations <violation>` beatifully.\n\n The default formatter does not allow us to do that.\n What things do we miss?\n\n 1. Spacing, everything is just mixed up and glued together\n 2. Colors and decoration, some information is easier\n to gather just with colors or underlined text\n 3. Grouping, we need explicit grouping by filename\n 4. Incomplete and non-informative statistics\n\n \"\"\"\n\n _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)\n\n # API:\n\n def after_init(self):\n \"\"\"Called after the original ``init`` is used to set extra fields.\"\"\"\n self._lexer = PythonLexer()\n self._formatter = TerminalFormatter()\n\n # Logic:\n self._processed_filenames: List[str] = []\n self._error_count = 0\n\n def handle(self, error: Violation) -> None: # noqa: WPS110\n \"\"\"Processes each :term:`violation` to print it and all related.\"\"\"\n if error.filename not in self._processed_filenames:\n self._print_header(error.filename)\n self._processed_filenames.append(error.filename)\n\n line = self.format(error)\n source = self.show_source(error)\n link = self._show_link(error)\n\n self._write(line)\n if link:\n self._write(link)\n if source:\n self._write(source)\n\n self._error_count += 1\n\n def format(self, error: Violation) -> str: # noqa: WPS125\n \"\"\"Called to format each individual :term:`violation`.\"\"\"\n return '{newline} {row_col:<8} {code:<5} {text}'.format(\n newline=self.newline if self._should_show_source(error) else '',\n code=error.code,\n text=error.text,\n row_col='{0}:{1}'.format(error.line_number, error.column_number),\n )\n\n def show_source(self, error: Violation) -> str:\n \"\"\"Called when ``--show-source`` option is provided.\"\"\"\n if not self._should_show_source(error):\n return ''\n\n formatted_line = error.physical_line.lstrip()\n adjust = len(error.physical_line) - len(formatted_line)\n\n code = _highlight(\n formatted_line,\n self._lexer,\n self._formatter,\n )\n\n return ' {code} {spacing}^'.format(\n code=code,\n spacing=' ' * (error.column_number - 1 - adjust),\n )\n\n def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210\n \"\"\"Called when ``--statistic`` option is passed.\"\"\"\n all_errors = 0\n for error_code in statistics.error_codes():\n stats_for_error_code = statistics.statistics_for(error_code)\n statistic = next(stats_for_error_code)\n\n count = statistic.count\n count += sum(stat.count for stat in stats_for_error_code)\n all_errors += count\n error_by_file = _count_per_filename(statistics, error_code)\n\n self._print_violation_per_file(\n statistic,\n error_code,\n count,\n error_by_file,\n )\n\n self._write(self.newline)\n self._write(_underline(_bold('All errors: {0}'.format(all_errors))))\n\n def stop(self) -> None:\n \"\"\"Runs once per app when the formatting ends.\"\"\"\n if self._error_count:\n message = '{0}Full list of violations and explanations:{0}{1}'\n self._write(message.format(self.newline, self._doc_url))\n\n # Our own methods:\n\n def _show_link(self, error: Violation) -> str:\n \"\"\"Called when ``--show-violation-links`` option is provided.\"\"\"\n if not self.options.show_violation_links:\n return ''\n\n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n link=SHORTLINK_TEMPLATE.format(error.code),\n )\n\n def _print_header(self, filename: str) -> None:\n self._write(\n '{newline}{filename}'.format(\n filename=_underline(_bold(filename)),\n newline=self.newline,\n ),\n )\n\n def _print_violation_per_file(\n self,\n statistic: Statistics,\n error_code: str,\n count: int,\n error_by_file: DefaultDict[str, int],\n ):\n self._write(\n '{newline}{error_code}: {message}'.format(\n newline=self.newline,\n error_code=_bold(error_code),\n message=statistic.message,\n ),\n )\n for filename, error_count in error_by_file.items():\n self._write(\n ' {error_count:<5} {filename}'.format(\n error_count=error_count,\n filename=filename,\n ),\n )\n self._write(_underline('Total: {0}'.format(count)))\n\n def _should_show_source(self, error: Violation) -> bool:\n return self.options.show_source and error.physical_line is not None\n\n\n# Formatting text:\n\ndef _bold(text: str) -> str:\n r\"\"\"\n Returns bold formatted text.\n\n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[1m{0}\\033[0m'.format(text)\n\n\ndef _underline(text: str) -> str:\n r\"\"\"\n Returns underlined formatted text.\n\n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[4m{0}\\033[0m'.format(text)\n\n\ndef _highlight(source: str, lexer, formatter) -> str:\n \"\"\"\n Highlights source code. Might fail.\n\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n\n \"\"\"\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n # Might fail on some systems, when colors are set incorrectly,\n # or not available at all. In this case code will be just text.\n return source\n\n\n# Helpers:\n\ndef _count_per_filename(\n statistics: Statistics,\n error_code: str,\n) -> DefaultDict[str, int]:\n filenames: DefaultDict[str, int] = defaultdict(int)\n stats_for_error_code = statistics.statistics_for(error_code)\n\n for stat in stats_for_error_code:\n filenames[stat.filename] += stat.count\n\n return filenames\n", "path": "wemake_python_styleguide/formatter.py"}], "after_files": [{"content": "\"\"\"\nOur very own ``flake8`` formatter for better error messages.\n\nThat's how all ``flake8`` formatters work:\n\n.. mermaid::\n :caption: ``flake8`` formatting API calls order.\n\n graph LR\n F2[start] --> F3[after_init]\n F3 --> F4[start]\n F4 --> F5[beginning]\n F5 --> F6[handle]\n F6 --> F7[format]\n F6\t --> F8[show_source]\n F6\t --> F9[show_statistic]\n F7 --> F10[finished]\n F8 --> F10[finished]\n F9 --> F10[finished]\n F10 -.-> F5\n F10 --> F11[stop]\n\n.. autoclass:: WemakeFormatter\n :no-undoc-members:\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom os import environ\nfrom typing import ClassVar, DefaultDict, Final, List\n\nfrom flake8.formatting.base import BaseFormatter\nfrom flake8.statistics import Statistics\nfrom flake8.style_guide import Violation\nfrom pygments import highlight\nfrom pygments.formatters import TerminalFormatter\nfrom pygments.lexers import PythonLexer\n\nfrom wemake_python_styleguide.version import pkg_version\n\n#: That url is generated and hosted by Sphinx.\n_DOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n)\n\n#: This url points to the specific violation page.\n_SHORTLINK_TEMPLATE: Final = 'https://pyflak.es/{0}'\n\n#: Option to disable any code highlight and text output format.\n#: See https://no-color.org\n_NO_COLOR: Final = environ.get('NO_COLOR', '0') == '1'\n\n\nclass WemakeFormatter(BaseFormatter): # noqa: WPS214\n \"\"\"\n We need to format our style :term:`violations <violation>` beatifully.\n\n The default formatter does not allow us to do that.\n What things do we miss?\n\n 1. Spacing, everything is just mixed up and glued together\n 2. Colors and decoration, some information is easier\n to gather just with colors or underlined text\n 3. Grouping, we need explicit grouping by filename\n 4. Incomplete and non-informative statistics\n\n \"\"\"\n\n _doc_url: ClassVar[str] = _DOCS_URL_TEMPLATE.format(pkg_version)\n\n # API:\n\n def after_init(self):\n \"\"\"Called after the original ``init`` is used to set extra fields.\"\"\"\n self._lexer = PythonLexer()\n self._formatter = TerminalFormatter()\n\n # Logic:\n self._processed_filenames: List[str] = []\n self._error_count = 0\n\n def handle(self, error: Violation) -> None: # noqa: WPS110\n \"\"\"Processes each :term:`violation` to print it and all related.\"\"\"\n if error.filename not in self._processed_filenames:\n self._print_header(error.filename)\n self._processed_filenames.append(error.filename)\n\n line = self.format(error)\n source = self.show_source(error)\n link = self._show_link(error)\n\n self._write(line)\n if link:\n self._write(link)\n if source:\n self._write(source)\n\n self._error_count += 1\n\n def format(self, error: Violation) -> str: # noqa: WPS125\n \"\"\"Called to format each individual :term:`violation`.\"\"\"\n return '{newline} {row_col:<8} {code:<5} {text}'.format(\n newline=self.newline if self._should_show_source(error) else '',\n code=error.code,\n text=error.text,\n row_col='{0}:{1}'.format(error.line_number, error.column_number),\n )\n\n def show_source(self, error: Violation) -> str:\n \"\"\"Called when ``--show-source`` option is provided.\"\"\"\n if not self._should_show_source(error):\n return ''\n\n formatted_line = error.physical_line.lstrip()\n adjust = len(error.physical_line) - len(formatted_line)\n\n code = _highlight(\n formatted_line,\n self._lexer,\n self._formatter,\n )\n\n return ' {code} {spacing}^'.format(\n code=code,\n spacing=' ' * (error.column_number - 1 - adjust),\n )\n\n def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210\n \"\"\"Called when ``--statistic`` option is passed.\"\"\"\n all_errors = 0\n for error_code in statistics.error_codes():\n stats_for_error_code = statistics.statistics_for(error_code)\n statistic = next(stats_for_error_code)\n\n count = statistic.count\n count += sum(stat.count for stat in stats_for_error_code)\n all_errors += count\n error_by_file = _count_per_filename(statistics, error_code)\n\n self._print_violation_per_file(\n statistic,\n error_code,\n count,\n error_by_file,\n )\n\n self._write(self.newline)\n self._write(_underline(_bold('All errors: {0}'.format(all_errors))))\n\n def stop(self) -> None:\n \"\"\"Runs once per app when the formatting ends.\"\"\"\n if self._error_count:\n message = '{0}Full list of violations and explanations:{0}{1}'\n self._write(message.format(self.newline, self._doc_url))\n\n # Our own methods:\n\n def _show_link(self, error: Violation) -> str:\n \"\"\"Called when ``--show-violation-links`` option is provided.\"\"\"\n if not self.options.show_violation_links:\n return ''\n\n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n link=_SHORTLINK_TEMPLATE.format(error.code),\n )\n\n def _print_header(self, filename: str) -> None:\n self._write(\n '{newline}{filename}'.format(\n filename=_underline(_bold(filename)),\n newline=self.newline,\n ),\n )\n\n def _print_violation_per_file(\n self,\n statistic: Statistics,\n error_code: str,\n count: int,\n error_by_file: DefaultDict[str, int],\n ):\n self._write(\n '{newline}{error_code}: {message}'.format(\n newline=self.newline,\n error_code=_bold(error_code),\n message=statistic.message,\n ),\n )\n for filename, error_count in error_by_file.items():\n self._write(\n ' {error_count:<5} {filename}'.format(\n error_count=error_count,\n filename=filename,\n ),\n )\n self._write(_underline('Total: {0}'.format(count)))\n\n def _should_show_source(self, error: Violation) -> bool:\n return self.options.show_source and error.physical_line is not None\n\n\n# Formatting text:\n\ndef _bold(text: str, *, no_color: bool = _NO_COLOR) -> str:\n r\"\"\"\n Returns bold formatted text.\n\n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n\n Returns non-formatted text if environment variable ``NO_COLOR=1``.\n\n >>> _bold('Hello!', no_color=True)\n 'Hello!'\n\n \"\"\"\n if no_color:\n return text\n return '\\033[1m{0}\\033[0m'.format(text)\n\n\ndef _underline(text: str, *, no_color: bool = _NO_COLOR) -> str:\n r\"\"\"\n Returns underlined formatted text.\n\n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n\n Returns non-formatted text if environment variable ``NO_COLOR=1``.\n\n >>> _underline('Hello!', no_color=True)\n 'Hello!'\n\n \"\"\"\n if no_color:\n return text\n return '\\033[4m{0}\\033[0m'.format(text)\n\n\ndef _highlight(\n source: str,\n lexer: PythonLexer,\n formatter: TerminalFormatter,\n *,\n no_color: bool = _NO_COLOR,\n) -> str:\n \"\"\"\n Highlights source code. Might fail.\n\n Returns non-formatted text if environment variable ``NO_COLOR=1``.\n\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n https://no-color.org\n\n \"\"\"\n if no_color:\n return source\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n # Might fail on some systems, when colors are set incorrectly,\n # or not available at all. In this case code will be just text.\n return source\n\n\n# Helpers:\n\ndef _count_per_filename(\n statistics: Statistics,\n error_code: str,\n) -> DefaultDict[str, int]:\n filenames: DefaultDict[str, int] = defaultdict(int)\n stats_for_error_code = statistics.statistics_for(error_code)\n\n for stat in stats_for_error_code:\n filenames[stat.filename] += stat.count\n\n return filenames\n", "path": "wemake_python_styleguide/formatter.py"}]}
| 2,891 | 926 |
gh_patches_debug_9546
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5266
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User order_expiry_time as the parameter to expire orders
**Describe the bug**
Currently we are expiring orders after 10 minutes. We should change it to order_expiry_time parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/order.py`
Content:
```
1 import logging
2 from datetime import timedelta, datetime, timezone
3
4 from flask import render_template
5
6 from app.api.helpers import ticketing
7 from app.api.helpers.db import save_to_db, safe_query_without_soft_deleted_entries, get_count
8 from app.api.helpers.exceptions import UnprocessableEntity, ConflictException
9 from app.api.helpers.files import create_save_pdf
10 from app.api.helpers.storage import UPLOAD_PATHS
11 from app.models import db
12 from app.models.ticket import Ticket
13 from app.models.ticket_holder import TicketHolder
14
15
16 def delete_related_attendees_for_order(order):
17 """
18 Delete the associated attendees of an order when it is cancelled/deleted/expired
19 :param order: Order whose attendees have to be deleted.
20 :return:
21 """
22 for ticket_holder in order.ticket_holders:
23 db.session.delete(ticket_holder)
24 try:
25 db.session.commit()
26 except Exception as e:
27 logging.error('DB Exception! %s' % e)
28 db.session.rollback()
29
30
31 def set_expiry_for_order(order, override=False):
32 """
33 Expire the order after the time slot(10 minutes) if the order is pending.
34 Also expires the order if we want to expire an order regardless of the state and time.
35 :param order: Order to be expired.
36 :param override: flag to force expiry.
37 :return:
38 """
39 if order and not order.paid_via and (override or (order.status == 'pending' and (
40 order.created_at +
41 timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):
42 order.status = 'expired'
43 delete_related_attendees_for_order(order)
44 save_to_db(order)
45 return order
46
47
48 def create_pdf_tickets_for_holder(order):
49 """
50 Create tickets for the holders of an order.
51 :param order: The order for which to create tickets for.
52 """
53 if order.status == 'completed':
54 pdf = create_save_pdf(render_template('pdf/ticket_purchaser.html', order=order),
55 UPLOAD_PATHS['pdf']['ticket_attendee'],
56 dir_path='/static/uploads/pdf/tickets/')
57 order.tickets_pdf_url = pdf
58
59 for holder in order.ticket_holders:
60 if (not holder.user) or holder.user.id != order.user_id:
61 # holder is not the order buyer.
62 pdf = create_save_pdf(render_template('pdf/ticket_attendee.html', order=order, holder=holder),
63 UPLOAD_PATHS['pdf']['ticket_attendee'],
64 dir_path='/static/uploads/pdf/tickets/')
65 else:
66 # holder is the order buyer.
67 pdf = order.tickets_pdf_url
68 holder.pdf_url = pdf
69 save_to_db(holder)
70
71 save_to_db(order)
72
73
74 def create_onsite_attendees_for_order(data):
75 """
76 Creates on site ticket holders for an order and adds it into the request data.
77 :param data: data initially passed in the POST request for order.
78 :return:
79 """
80 on_site_tickets = data.get('on_site_tickets')
81
82 if not on_site_tickets:
83 raise UnprocessableEntity({'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing')
84
85 data['ticket_holders'] = []
86
87 for on_site_ticket in on_site_tickets:
88 ticket_id = on_site_ticket['id']
89 quantity = int(on_site_ticket['quantity'])
90
91 ticket = safe_query_without_soft_deleted_entries(db, Ticket, 'id', ticket_id, 'ticket_id')
92
93 ticket_sold_count = get_count(db.session.query(TicketHolder.id).
94 filter_by(ticket_id=int(ticket.id), deleted_at=None))
95
96 # Check if the ticket is already sold out or not.
97 if ticket_sold_count + quantity > ticket.quantity:
98 # delete the already created attendees.
99 for holder in data['ticket_holders']:
100 ticket_holder = db.session.query(TicketHolder).filter(id == int(holder)).one()
101 db.session.delete(ticket_holder)
102 try:
103 db.session.commit()
104 except Exception as e:
105 logging.error('DB Exception! %s' % e)
106 db.session.rollback()
107
108 raise ConflictException(
109 {'pointer': '/data/attributes/on_site_tickets'},
110 "Ticket with id: {} already sold out. You can buy at most {} tickets".format(ticket_id,
111 ticket.quantity -
112 ticket_sold_count)
113 )
114
115 for _ in range(1, quantity):
116 ticket_holder = TicketHolder(firstname='onsite', lastname='attendee', email='[email protected]',
117 ticket_id=ticket.id, event_id=data.get('event'))
118 save_to_db(ticket_holder)
119 data['ticket_holders'].append(ticket_holder.id)
120
121 # delete from the data.
122 del data['on_site_tickets']
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py
--- a/app/api/helpers/order.py
+++ b/app/api/helpers/order.py
@@ -38,7 +38,7 @@
"""
if order and not order.paid_via and (override or (order.status == 'pending' and (
order.created_at +
- timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):
+ timedelta(minutes=order.event.order_expiry_time)) < datetime.now(timezone.utc))):
order.status = 'expired'
delete_related_attendees_for_order(order)
save_to_db(order)
|
{"golden_diff": "diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py\n--- a/app/api/helpers/order.py\n+++ b/app/api/helpers/order.py\n@@ -38,7 +38,7 @@\n \"\"\"\n if order and not order.paid_via and (override or (order.status == 'pending' and (\n order.created_at +\n- timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):\n+ timedelta(minutes=order.event.order_expiry_time)) < datetime.now(timezone.utc))):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n", "issue": "User order_expiry_time as the parameter to expire orders\n**Describe the bug**\r\nCurrently we are expiring orders after 10 minutes. We should change it to order_expiry_time parameter. \n", "before_files": [{"content": "import logging\nfrom datetime import timedelta, datetime, timezone\n\nfrom flask import render_template\n\nfrom app.api.helpers import ticketing\nfrom app.api.helpers.db import save_to_db, safe_query_without_soft_deleted_entries, get_count\nfrom app.api.helpers.exceptions import UnprocessableEntity, ConflictException\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\n\n\ndef delete_related_attendees_for_order(order):\n \"\"\"\n Delete the associated attendees of an order when it is cancelled/deleted/expired\n :param order: Order whose attendees have to be deleted.\n :return:\n \"\"\"\n for ticket_holder in order.ticket_holders:\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n\ndef set_expiry_for_order(order, override=False):\n \"\"\"\n Expire the order after the time slot(10 minutes) if the order is pending.\n Also expires the order if we want to expire an order regardless of the state and time.\n :param order: Order to be expired.\n :param override: flag to force expiry.\n :return:\n \"\"\"\n if order and not order.paid_via and (override or (order.status == 'pending' and (\n order.created_at +\n timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n return order\n\n\ndef create_pdf_tickets_for_holder(order):\n \"\"\"\n Create tickets for the holders of an order.\n :param order: The order for which to create tickets for.\n \"\"\"\n if order.status == 'completed':\n pdf = create_save_pdf(render_template('pdf/ticket_purchaser.html', order=order),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n order.tickets_pdf_url = pdf\n\n for holder in order.ticket_holders:\n if (not holder.user) or holder.user.id != order.user_id:\n # holder is not the order buyer.\n pdf = create_save_pdf(render_template('pdf/ticket_attendee.html', order=order, holder=holder),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n else:\n # holder is the order buyer.\n pdf = order.tickets_pdf_url\n holder.pdf_url = pdf\n save_to_db(holder)\n\n save_to_db(order)\n\n\ndef create_onsite_attendees_for_order(data):\n \"\"\"\n Creates on site ticket holders for an order and adds it into the request data.\n :param data: data initially passed in the POST request for order.\n :return:\n \"\"\"\n on_site_tickets = data.get('on_site_tickets')\n\n if not on_site_tickets:\n raise UnprocessableEntity({'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing')\n\n data['ticket_holders'] = []\n\n for on_site_ticket in on_site_tickets:\n ticket_id = on_site_ticket['id']\n quantity = int(on_site_ticket['quantity'])\n\n ticket = safe_query_without_soft_deleted_entries(db, Ticket, 'id', ticket_id, 'ticket_id')\n\n ticket_sold_count = get_count(db.session.query(TicketHolder.id).\n filter_by(ticket_id=int(ticket.id), deleted_at=None))\n\n # Check if the ticket is already sold out or not.\n if ticket_sold_count + quantity > ticket.quantity:\n # delete the already created attendees.\n for holder in data['ticket_holders']:\n ticket_holder = db.session.query(TicketHolder).filter(id == int(holder)).one()\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n raise ConflictException(\n {'pointer': '/data/attributes/on_site_tickets'},\n \"Ticket with id: {} already sold out. You can buy at most {} tickets\".format(ticket_id,\n ticket.quantity -\n ticket_sold_count)\n )\n\n for _ in range(1, quantity):\n ticket_holder = TicketHolder(firstname='onsite', lastname='attendee', email='[email protected]',\n ticket_id=ticket.id, event_id=data.get('event'))\n save_to_db(ticket_holder)\n data['ticket_holders'].append(ticket_holder.id)\n\n # delete from the data.\n del data['on_site_tickets']\n", "path": "app/api/helpers/order.py"}], "after_files": [{"content": "import logging\nfrom datetime import timedelta, datetime, timezone\n\nfrom flask import render_template\n\nfrom app.api.helpers import ticketing\nfrom app.api.helpers.db import save_to_db, safe_query_without_soft_deleted_entries, get_count\nfrom app.api.helpers.exceptions import UnprocessableEntity, ConflictException\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\n\n\ndef delete_related_attendees_for_order(order):\n \"\"\"\n Delete the associated attendees of an order when it is cancelled/deleted/expired\n :param order: Order whose attendees have to be deleted.\n :return:\n \"\"\"\n for ticket_holder in order.ticket_holders:\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n\ndef set_expiry_for_order(order, override=False):\n \"\"\"\n Expire the order after the time slot(10 minutes) if the order is pending.\n Also expires the order if we want to expire an order regardless of the state and time.\n :param order: Order to be expired.\n :param override: flag to force expiry.\n :return:\n \"\"\"\n if order and not order.paid_via and (override or (order.status == 'pending' and (\n order.created_at +\n timedelta(minutes=order.event.order_expiry_time)) < datetime.now(timezone.utc))):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n return order\n\n\ndef create_pdf_tickets_for_holder(order):\n \"\"\"\n Create tickets for the holders of an order.\n :param order: The order for which to create tickets for.\n \"\"\"\n if order.status == 'completed':\n pdf = create_save_pdf(render_template('pdf/ticket_purchaser.html', order=order),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n order.tickets_pdf_url = pdf\n\n for holder in order.ticket_holders:\n if (not holder.user) or holder.user.id != order.user_id:\n # holder is not the order buyer.\n pdf = create_save_pdf(render_template('pdf/ticket_attendee.html', order=order, holder=holder),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n else:\n # holder is the order buyer.\n pdf = order.tickets_pdf_url\n holder.pdf_url = pdf\n save_to_db(holder)\n\n save_to_db(order)\n\n\ndef create_onsite_attendees_for_order(data):\n \"\"\"\n Creates on site ticket holders for an order and adds it into the request data.\n :param data: data initially passed in the POST request for order.\n :return:\n \"\"\"\n on_site_tickets = data.get('on_site_tickets')\n\n if not on_site_tickets:\n raise UnprocessableEntity({'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing')\n\n data['ticket_holders'] = []\n\n for on_site_ticket in on_site_tickets:\n ticket_id = on_site_ticket['id']\n quantity = int(on_site_ticket['quantity'])\n\n ticket = safe_query_without_soft_deleted_entries(db, Ticket, 'id', ticket_id, 'ticket_id')\n\n ticket_sold_count = get_count(db.session.query(TicketHolder.id).\n filter_by(ticket_id=int(ticket.id), deleted_at=None))\n\n # Check if the ticket is already sold out or not.\n if ticket_sold_count + quantity > ticket.quantity:\n # delete the already created attendees.\n for holder in data['ticket_holders']:\n ticket_holder = db.session.query(TicketHolder).filter(id == int(holder)).one()\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n raise ConflictException(\n {'pointer': '/data/attributes/on_site_tickets'},\n \"Ticket with id: {} already sold out. You can buy at most {} tickets\".format(ticket_id,\n ticket.quantity -\n ticket_sold_count)\n )\n\n for _ in range(1, quantity):\n ticket_holder = TicketHolder(firstname='onsite', lastname='attendee', email='[email protected]',\n ticket_id=ticket.id, event_id=data.get('event'))\n save_to_db(ticket_holder)\n data['ticket_holders'].append(ticket_holder.id)\n\n # delete from the data.\n del data['on_site_tickets']\n", "path": "app/api/helpers/order.py"}]}
| 1,564 | 136 |
gh_patches_debug_26569
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-4239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Recomended show IMDB Popular error
Medusa Info: | Branch: master Commit: 212cd1c8a350f2d5ca40f172ed5a227d9a5cb80f Version: v0.2.3 Database: 44.9
-- | --
Python Version: | 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:25:58) [MSC v.1500 64 bit (AMD64)]
SSL Version: | OpenSSL 1.0.2k 26 Jan 2017
OS: | Windows-10-10.0.14393
Locale: | nl_NL.cp1252

```
2018-05-21 10:48:00 WARNING Thread_24 :: [212cd1c] Could not parse show tt6845390 with error: u'year'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/show/recommendations/imdb.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import logging
6 import os
7 import posixpath
8 import re
9 from builtins import object
10
11 from imdbpie import imdbpie
12
13 from medusa import helpers
14 from medusa.cache import recommended_series_cache
15 from medusa.indexers.indexer_config import INDEXER_TVDBV2
16 from medusa.logger.adapters.style import BraceAdapter
17 from medusa.session.core import MedusaSession
18 from medusa.show.recommendations.recommended import (
19 RecommendedShow, cached_get_imdb_series_details, create_key_from_series,
20 update_recommended_series_cache_index
21 )
22
23 from requests import RequestException
24
25 from six import binary_type
26
27 log = BraceAdapter(logging.getLogger(__name__))
28 log.logger.addHandler(logging.NullHandler())
29
30 imdb_api = imdbpie.Imdb()
31
32
33 class ImdbPopular(object):
34 """Gets a list of most popular TV series from imdb."""
35
36 def __init__(self):
37 """Initialize class."""
38 self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__
39 self.session = MedusaSession()
40 self.recommender = 'IMDB Popular'
41 self.default_img_src = 'poster.png'
42
43 @recommended_series_cache.cache_on_arguments(namespace='imdb', function_key_generator=create_key_from_series)
44 def _create_recommended_show(self, series, storage_key=None):
45 """Create the RecommendedShow object from the returned showobj."""
46 tvdb_id = helpers.get_tvdb_from_id(series.get('imdb_tt'), 'IMDB')
47
48 if not tvdb_id:
49 return None
50
51 rec_show = RecommendedShow(
52 self,
53 series.get('imdb_tt'),
54 series.get('name'),
55 INDEXER_TVDBV2,
56 int(tvdb_id),
57 **{'rating': series.get('rating'),
58 'votes': series.get('votes'),
59 'image_href': series.get('imdb_url')}
60 )
61
62 if series.get('image_url'):
63 rec_show.cache_image(series.get('image_url'))
64
65 return rec_show
66
67 def fetch_popular_shows(self):
68 """Get popular show information from IMDB."""
69 popular_shows = []
70
71 imdb_result = imdb_api.get_popular_shows()
72
73 for imdb_show in imdb_result['ranks']:
74 series = {}
75 imdb_id = series['imdb_tt'] = imdb_show['id'].strip('/').split('/')[-1]
76
77 if imdb_id:
78 show_details = cached_get_imdb_series_details(imdb_id)
79 if show_details:
80 try:
81 series['year'] = imdb_show['year']
82 series['name'] = imdb_show['title']
83 series['image_url_large'] = imdb_show['image']['url']
84 series['image_path'] = posixpath.join('images', 'imdb_popular',
85 os.path.basename(series['image_url_large']))
86 series['image_url'] = '{0}{1}'.format(imdb_show['image']['url'].split('V1')[0], '_SY600_AL_.jpg')
87 series['imdb_url'] = 'http://www.imdb.com{imdb_id}'.format(imdb_id=imdb_show['id'])
88 series['votes'] = show_details['ratings'].get('ratingCount', 0)
89 series['outline'] = show_details['plot'].get('outline', {}).get('text')
90 series['rating'] = show_details['ratings'].get('rating', 0)
91 except Exception as error:
92 log.warning('Could not parse show {imdb_id} with error: {error}',
93 {'imdb_id': imdb_id, 'error': error})
94 else:
95 continue
96
97 if all([series['year'], series['name'], series['imdb_tt']]):
98 popular_shows.append(series)
99
100 result = []
101 for series in popular_shows:
102 try:
103 recommended_show = self._create_recommended_show(series, storage_key=b'imdb_{0}'.format(series['imdb_tt']))
104 if recommended_show:
105 result.append(recommended_show)
106 except RequestException:
107 log.warning(
108 u'Could not connect to indexers to check if you already have'
109 u' this show in your library: {show} ({year})',
110 {'show': series['name'], 'year': series['name']}
111 )
112
113 # Update the dogpile index. This will allow us to retrieve all stored dogpile shows from the dbm.
114 update_recommended_series_cache_index('imdb', [binary_type(s.series_id) for s in result])
115
116 return result
117
118 @staticmethod
119 def change_size(image_url, factor=3):
120 """Change the size of the image we get from IMDB.
121
122 :param: image_url: Image source URL
123 :param: factor: Multiplier for the image size
124 """
125 match = re.search(r'(.+[X|Y])(\d+)(_CR\d+,\d+,)(\d+),(\d+)', image_url)
126
127 if match:
128 matches = list(match.groups())
129 matches[1] = int(matches[1]) * factor
130 matches[3] = int(matches[3]) * factor
131 matches[4] = int(matches[4]) * factor
132
133 return '{0}{1}{2}{3},{4}_AL_.jpg'.format(matches[0], matches[1], matches[2],
134 matches[3], matches[4])
135 else:
136 return image_url
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/medusa/show/recommendations/imdb.py b/medusa/show/recommendations/imdb.py
--- a/medusa/show/recommendations/imdb.py
+++ b/medusa/show/recommendations/imdb.py
@@ -78,7 +78,7 @@
show_details = cached_get_imdb_series_details(imdb_id)
if show_details:
try:
- series['year'] = imdb_show['year']
+ series['year'] = imdb_show.get('year')
series['name'] = imdb_show['title']
series['image_url_large'] = imdb_show['image']['url']
series['image_path'] = posixpath.join('images', 'imdb_popular',
@@ -89,7 +89,7 @@
series['outline'] = show_details['plot'].get('outline', {}).get('text')
series['rating'] = show_details['ratings'].get('rating', 0)
except Exception as error:
- log.warning('Could not parse show {imdb_id} with error: {error}',
+ log.warning('Could not parse show {imdb_id} with error: {error!r}',
{'imdb_id': imdb_id, 'error': error})
else:
continue
|
{"golden_diff": "diff --git a/medusa/show/recommendations/imdb.py b/medusa/show/recommendations/imdb.py\n--- a/medusa/show/recommendations/imdb.py\n+++ b/medusa/show/recommendations/imdb.py\n@@ -78,7 +78,7 @@\n show_details = cached_get_imdb_series_details(imdb_id)\n if show_details:\n try:\n- series['year'] = imdb_show['year']\n+ series['year'] = imdb_show.get('year')\n series['name'] = imdb_show['title']\n series['image_url_large'] = imdb_show['image']['url']\n series['image_path'] = posixpath.join('images', 'imdb_popular',\n@@ -89,7 +89,7 @@\n series['outline'] = show_details['plot'].get('outline', {}).get('text')\n series['rating'] = show_details['ratings'].get('rating', 0)\n except Exception as error:\n- log.warning('Could not parse show {imdb_id} with error: {error}',\n+ log.warning('Could not parse show {imdb_id} with error: {error!r}',\n {'imdb_id': imdb_id, 'error': error})\n else:\n continue\n", "issue": "Add Recomended show IMDB Popular error\nMedusa Info: | Branch: master Commit: 212cd1c8a350f2d5ca40f172ed5a227d9a5cb80f Version: v0.2.3 Database: 44.9\r\n-- | --\r\nPython Version: | 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:25:58) [MSC v.1500 64 bit (AMD64)]\r\nSSL Version: | OpenSSL 1.0.2k 26 Jan 2017\r\nOS: | Windows-10-10.0.14393\r\nLocale: | nl_NL.cp1252\r\n\r\n\r\n\r\n```\r\n2018-05-21 10:48:00 WARNING Thread_24 :: [212cd1c] Could not parse show tt6845390 with error: u'year'\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport posixpath\nimport re\nfrom builtins import object\n\nfrom imdbpie import imdbpie\n\nfrom medusa import helpers\nfrom medusa.cache import recommended_series_cache\nfrom medusa.indexers.indexer_config import INDEXER_TVDBV2\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.session.core import MedusaSession\nfrom medusa.show.recommendations.recommended import (\n RecommendedShow, cached_get_imdb_series_details, create_key_from_series,\n update_recommended_series_cache_index\n)\n\nfrom requests import RequestException\n\nfrom six import binary_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\nimdb_api = imdbpie.Imdb()\n\n\nclass ImdbPopular(object):\n \"\"\"Gets a list of most popular TV series from imdb.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize class.\"\"\"\n self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__\n self.session = MedusaSession()\n self.recommender = 'IMDB Popular'\n self.default_img_src = 'poster.png'\n\n @recommended_series_cache.cache_on_arguments(namespace='imdb', function_key_generator=create_key_from_series)\n def _create_recommended_show(self, series, storage_key=None):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n tvdb_id = helpers.get_tvdb_from_id(series.get('imdb_tt'), 'IMDB')\n\n if not tvdb_id:\n return None\n\n rec_show = RecommendedShow(\n self,\n series.get('imdb_tt'),\n series.get('name'),\n INDEXER_TVDBV2,\n int(tvdb_id),\n **{'rating': series.get('rating'),\n 'votes': series.get('votes'),\n 'image_href': series.get('imdb_url')}\n )\n\n if series.get('image_url'):\n rec_show.cache_image(series.get('image_url'))\n\n return rec_show\n\n def fetch_popular_shows(self):\n \"\"\"Get popular show information from IMDB.\"\"\"\n popular_shows = []\n\n imdb_result = imdb_api.get_popular_shows()\n\n for imdb_show in imdb_result['ranks']:\n series = {}\n imdb_id = series['imdb_tt'] = imdb_show['id'].strip('/').split('/')[-1]\n\n if imdb_id:\n show_details = cached_get_imdb_series_details(imdb_id)\n if show_details:\n try:\n series['year'] = imdb_show['year']\n series['name'] = imdb_show['title']\n series['image_url_large'] = imdb_show['image']['url']\n series['image_path'] = posixpath.join('images', 'imdb_popular',\n os.path.basename(series['image_url_large']))\n series['image_url'] = '{0}{1}'.format(imdb_show['image']['url'].split('V1')[0], '_SY600_AL_.jpg')\n series['imdb_url'] = 'http://www.imdb.com{imdb_id}'.format(imdb_id=imdb_show['id'])\n series['votes'] = show_details['ratings'].get('ratingCount', 0)\n series['outline'] = show_details['plot'].get('outline', {}).get('text')\n series['rating'] = show_details['ratings'].get('rating', 0)\n except Exception as error:\n log.warning('Could not parse show {imdb_id} with error: {error}',\n {'imdb_id': imdb_id, 'error': error})\n else:\n continue\n\n if all([series['year'], series['name'], series['imdb_tt']]):\n popular_shows.append(series)\n\n result = []\n for series in popular_shows:\n try:\n recommended_show = self._create_recommended_show(series, storage_key=b'imdb_{0}'.format(series['imdb_tt']))\n if recommended_show:\n result.append(recommended_show)\n except RequestException:\n log.warning(\n u'Could not connect to indexers to check if you already have'\n u' this show in your library: {show} ({year})',\n {'show': series['name'], 'year': series['name']}\n )\n\n # Update the dogpile index. This will allow us to retrieve all stored dogpile shows from the dbm.\n update_recommended_series_cache_index('imdb', [binary_type(s.series_id) for s in result])\n\n return result\n\n @staticmethod\n def change_size(image_url, factor=3):\n \"\"\"Change the size of the image we get from IMDB.\n\n :param: image_url: Image source URL\n :param: factor: Multiplier for the image size\n \"\"\"\n match = re.search(r'(.+[X|Y])(\\d+)(_CR\\d+,\\d+,)(\\d+),(\\d+)', image_url)\n\n if match:\n matches = list(match.groups())\n matches[1] = int(matches[1]) * factor\n matches[3] = int(matches[3]) * factor\n matches[4] = int(matches[4]) * factor\n\n return '{0}{1}{2}{3},{4}_AL_.jpg'.format(matches[0], matches[1], matches[2],\n matches[3], matches[4])\n else:\n return image_url\n", "path": "medusa/show/recommendations/imdb.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport posixpath\nimport re\nfrom builtins import object\n\nfrom imdbpie import imdbpie\n\nfrom medusa import helpers\nfrom medusa.cache import recommended_series_cache\nfrom medusa.indexers.indexer_config import INDEXER_TVDBV2\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.session.core import MedusaSession\nfrom medusa.show.recommendations.recommended import (\n RecommendedShow, cached_get_imdb_series_details, create_key_from_series,\n update_recommended_series_cache_index\n)\n\nfrom requests import RequestException\n\nfrom six import binary_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\nimdb_api = imdbpie.Imdb()\n\n\nclass ImdbPopular(object):\n \"\"\"Gets a list of most popular TV series from imdb.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize class.\"\"\"\n self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__\n self.session = MedusaSession()\n self.recommender = 'IMDB Popular'\n self.default_img_src = 'poster.png'\n\n @recommended_series_cache.cache_on_arguments(namespace='imdb', function_key_generator=create_key_from_series)\n def _create_recommended_show(self, series, storage_key=None):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n tvdb_id = helpers.get_tvdb_from_id(series.get('imdb_tt'), 'IMDB')\n\n if not tvdb_id:\n return None\n\n rec_show = RecommendedShow(\n self,\n series.get('imdb_tt'),\n series.get('name'),\n INDEXER_TVDBV2,\n int(tvdb_id),\n **{'rating': series.get('rating'),\n 'votes': series.get('votes'),\n 'image_href': series.get('imdb_url')}\n )\n\n if series.get('image_url'):\n rec_show.cache_image(series.get('image_url'))\n\n return rec_show\n\n def fetch_popular_shows(self):\n \"\"\"Get popular show information from IMDB.\"\"\"\n popular_shows = []\n\n imdb_result = imdb_api.get_popular_shows()\n\n for imdb_show in imdb_result['ranks']:\n series = {}\n imdb_id = series['imdb_tt'] = imdb_show['id'].strip('/').split('/')[-1]\n\n if imdb_id:\n show_details = cached_get_imdb_series_details(imdb_id)\n if show_details:\n try:\n series['year'] = imdb_show.get('year')\n series['name'] = imdb_show['title']\n series['image_url_large'] = imdb_show['image']['url']\n series['image_path'] = posixpath.join('images', 'imdb_popular',\n os.path.basename(series['image_url_large']))\n series['image_url'] = '{0}{1}'.format(imdb_show['image']['url'].split('V1')[0], '_SY600_AL_.jpg')\n series['imdb_url'] = 'http://www.imdb.com{imdb_id}'.format(imdb_id=imdb_show['id'])\n series['votes'] = show_details['ratings'].get('ratingCount', 0)\n series['outline'] = show_details['plot'].get('outline', {}).get('text')\n series['rating'] = show_details['ratings'].get('rating', 0)\n except Exception as error:\n log.warning('Could not parse show {imdb_id} with error: {error!r}',\n {'imdb_id': imdb_id, 'error': error})\n else:\n continue\n\n if all([series['year'], series['name'], series['imdb_tt']]):\n popular_shows.append(series)\n\n result = []\n for series in popular_shows:\n try:\n recommended_show = self._create_recommended_show(series, storage_key=b'imdb_{0}'.format(series['imdb_tt']))\n if recommended_show:\n result.append(recommended_show)\n except RequestException:\n log.warning(\n u'Could not connect to indexers to check if you already have'\n u' this show in your library: {show} ({year})',\n {'show': series['name'], 'year': series['name']}\n )\n\n # Update the dogpile index. This will allow us to retrieve all stored dogpile shows from the dbm.\n update_recommended_series_cache_index('imdb', [binary_type(s.series_id) for s in result])\n\n return result\n\n @staticmethod\n def change_size(image_url, factor=3):\n \"\"\"Change the size of the image we get from IMDB.\n\n :param: image_url: Image source URL\n :param: factor: Multiplier for the image size\n \"\"\"\n match = re.search(r'(.+[X|Y])(\\d+)(_CR\\d+,\\d+,)(\\d+),(\\d+)', image_url)\n\n if match:\n matches = list(match.groups())\n matches[1] = int(matches[1]) * factor\n matches[3] = int(matches[3]) * factor\n matches[4] = int(matches[4]) * factor\n\n return '{0}{1}{2}{3},{4}_AL_.jpg'.format(matches[0], matches[1], matches[2],\n matches[3], matches[4])\n else:\n return image_url\n", "path": "medusa/show/recommendations/imdb.py"}]}
| 2,054 | 274 |
gh_patches_debug_18443
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-3398
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add missing ASDF schemas for new coordinate frames in 1.1
Whoops
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/io/special/asdf/tags/coordinates/frames.py`
Content:
```
1 import os
2 import glob
3
4 from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType
5
6 import sunpy.coordinates
7
8 from ...types import SunPyType
9
10 __all__ = ['SunPyCoordType']
11
12
13 SCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
14 '..', '..',
15 'schemas',
16 'sunpy.org',
17 'sunpy'))
18
19
20 def _get_frames():
21 """
22 By reading the schema files, get the list of all the frames we can
23 save/load.
24 """
25 search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
26 files = glob.glob(search)
27
28 names = []
29 for fpath in files:
30 path, fname = os.path.split(fpath)
31 frame, _ = fname.split('-')
32 exclude_schemas = []
33 if frame not in exclude_schemas:
34 names.append(frame)
35
36 return names
37
38
39 class SunPyCoordType(BaseCoordType, SunPyType):
40 _tag_prefix = "coordinates/frames/"
41 name = ["coordinates/frames/" + f for f in _get_frames()]
42 types = [
43 sunpy.coordinates.HeliographicCarrington,
44 sunpy.coordinates.HeliographicStonyhurst,
45 sunpy.coordinates.Heliocentric,
46 sunpy.coordinates.Helioprojective,
47 ]
48 requires = ['sunpy', 'astropy>=3.1']
49 version = "1.0.0"
50
51 @classmethod
52 def assert_equal(cls, old, new):
53 assert isinstance(new, type(old))
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/io/special/asdf/tags/coordinates/frames.py b/sunpy/io/special/asdf/tags/coordinates/frames.py
--- a/sunpy/io/special/asdf/tags/coordinates/frames.py
+++ b/sunpy/io/special/asdf/tags/coordinates/frames.py
@@ -3,7 +3,9 @@
from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType
-import sunpy.coordinates
+from sunpy.coordinates import frames
+
+sunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))
from ...types import SunPyType
@@ -39,12 +41,7 @@
class SunPyCoordType(BaseCoordType, SunPyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
- types = [
- sunpy.coordinates.HeliographicCarrington,
- sunpy.coordinates.HeliographicStonyhurst,
- sunpy.coordinates.Heliocentric,
- sunpy.coordinates.Helioprojective,
- ]
+ types = sunpy_frames
requires = ['sunpy', 'astropy>=3.1']
version = "1.0.0"
|
{"golden_diff": "diff --git a/sunpy/io/special/asdf/tags/coordinates/frames.py b/sunpy/io/special/asdf/tags/coordinates/frames.py\n--- a/sunpy/io/special/asdf/tags/coordinates/frames.py\n+++ b/sunpy/io/special/asdf/tags/coordinates/frames.py\n@@ -3,7 +3,9 @@\n \n from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType\n \n-import sunpy.coordinates\n+from sunpy.coordinates import frames\n+\n+sunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))\n \n from ...types import SunPyType\n \n@@ -39,12 +41,7 @@\n class SunPyCoordType(BaseCoordType, SunPyType):\n _tag_prefix = \"coordinates/frames/\"\n name = [\"coordinates/frames/\" + f for f in _get_frames()]\n- types = [\n- sunpy.coordinates.HeliographicCarrington,\n- sunpy.coordinates.HeliographicStonyhurst,\n- sunpy.coordinates.Heliocentric,\n- sunpy.coordinates.Helioprojective,\n- ]\n+ types = sunpy_frames\n requires = ['sunpy', 'astropy>=3.1']\n version = \"1.0.0\"\n", "issue": "Add missing ASDF schemas for new coordinate frames in 1.1\nWhoops\n", "before_files": [{"content": "import os\nimport glob\n\nfrom astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType\n\nimport sunpy.coordinates\n\nfrom ...types import SunPyType\n\n__all__ = ['SunPyCoordType']\n\n\nSCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', '..',\n 'schemas',\n 'sunpy.org',\n 'sunpy'))\n\n\ndef _get_frames():\n \"\"\"\n By reading the schema files, get the list of all the frames we can\n save/load.\n \"\"\"\n search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')\n files = glob.glob(search)\n\n names = []\n for fpath in files:\n path, fname = os.path.split(fpath)\n frame, _ = fname.split('-')\n exclude_schemas = []\n if frame not in exclude_schemas:\n names.append(frame)\n\n return names\n\n\nclass SunPyCoordType(BaseCoordType, SunPyType):\n _tag_prefix = \"coordinates/frames/\"\n name = [\"coordinates/frames/\" + f for f in _get_frames()]\n types = [\n sunpy.coordinates.HeliographicCarrington,\n sunpy.coordinates.HeliographicStonyhurst,\n sunpy.coordinates.Heliocentric,\n sunpy.coordinates.Helioprojective,\n ]\n requires = ['sunpy', 'astropy>=3.1']\n version = \"1.0.0\"\n\n @classmethod\n def assert_equal(cls, old, new):\n assert isinstance(new, type(old))\n", "path": "sunpy/io/special/asdf/tags/coordinates/frames.py"}], "after_files": [{"content": "import os\nimport glob\n\nfrom astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType\n\nfrom sunpy.coordinates import frames\n\nsunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))\n\nfrom ...types import SunPyType\n\n__all__ = ['SunPyCoordType']\n\n\nSCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', '..',\n 'schemas',\n 'sunpy.org',\n 'sunpy'))\n\n\ndef _get_frames():\n \"\"\"\n By reading the schema files, get the list of all the frames we can\n save/load.\n \"\"\"\n search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')\n files = glob.glob(search)\n\n names = []\n for fpath in files:\n path, fname = os.path.split(fpath)\n frame, _ = fname.split('-')\n exclude_schemas = []\n if frame not in exclude_schemas:\n names.append(frame)\n\n return names\n\n\nclass SunPyCoordType(BaseCoordType, SunPyType):\n _tag_prefix = \"coordinates/frames/\"\n name = [\"coordinates/frames/\" + f for f in _get_frames()]\n types = sunpy_frames\n requires = ['sunpy', 'astropy>=3.1']\n version = \"1.0.0\"\n\n @classmethod\n def assert_equal(cls, old, new):\n assert isinstance(new, type(old))\n", "path": "sunpy/io/special/asdf/tags/coordinates/frames.py"}]}
| 721 | 274 |
gh_patches_debug_16356
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-2300
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ExcessStatistics class throws an error for `time_step` with an astropy unit
### Bug description
The current implementation of the `ExcessStatistics` class doesn't allow astropy units for the parameter `time_step`. See example below.
### Expected outcome
Ideally, `total_time_above_threshold`, `average_times` and `rms_times` should have the same units as `time_step`.
### Minimal complete verifiable example
```Python
from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics
import astropy.units as u
signal = [0, 0, 2, 2, 0, 4]
thresholds = 1
time_step = 1 * u.s
excess_statistics = ExcessStatistics(signal, thresholds, time_step)
```
### Package versions
v2023.5.1
### Additional context
This is also relevant for PR #2275. One could also add a check whether time units are used for `time_step` or would this be to cumbersome?
Since I implemented the `ExcessStatistics` class I would be happy to be assigned to this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/analysis/time_series/excess_statistics.py`
Content:
```
1 """
2 Functionality to calculate excess statistics of time series.
3
4 .. attention::
5
6 |expect-api-changes|
7 """
8
9 __all__ = ["ExcessStatistics"]
10
11
12 import numbers
13 import numpy as np
14
15 from collections.abc import Iterable
16
17
18 class ExcessStatistics:
19 """
20 Calculate total time, number of upwards crossings, average time and
21 root-mean-square time above given thresholds of a sequence.
22
23 Parameters
24 ----------
25 signal : 1D |array_like|
26 Signal to be analyzed.
27
28 thresholds : 1D |array_like|
29 Threshold values.
30
31 time_step : int
32 Time step of ``signal``.
33
34 Raises
35 ------
36 `ValueError`
37 If ``time_step`` ≤ 0.
38
39 Example
40 -------
41 >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics
42 >>> signal = [0, 0, 2, 2, 0, 4]
43 >>> thresholds = [1, 3, 5]
44 >>> time_step = 1
45 >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)
46 >>> excess_statistics.total_time_above_threshold
47 [3, 1, 0]
48 >>> excess_statistics.number_of_crossings
49 [2, 1, 0]
50 >>> excess_statistics.average_times
51 [1.5, 1.0, 0]
52 >>> excess_statistics.rms_times
53 [0.5, 0.0, 0]
54 """
55
56 def __init__(self, signal, thresholds, time_step):
57 if time_step <= 0:
58 raise ValueError("time_step must be positive")
59
60 # make sure thresholds is an iterable
61 if not isinstance(thresholds, Iterable):
62 thresholds = [thresholds]
63
64 self._total_time_above_threshold = []
65 self._number_of_crossings = []
66 self._average_times = []
67 self._rms_times = []
68 self.events_per_threshold = {}
69
70 self._calculate_excess_statistics(signal, thresholds, time_step)
71
72 def _calculate_excess_statistics(self, signal, thresholds, time_step):
73 for threshold in thresholds:
74 indices_above_threshold = np.where(np.array(signal) > threshold)[0]
75
76 if len(indices_above_threshold) == 0:
77 self._times_above_threshold = []
78 self._total_time_above_threshold.append(0)
79 self._number_of_crossings.append(0)
80 self._average_times.append(0)
81 self._rms_times.append(0)
82
83 else:
84 self._total_time_above_threshold.append(
85 time_step * len(indices_above_threshold)
86 )
87
88 distances_to_next_index = (
89 indices_above_threshold[1:] - indices_above_threshold[:-1]
90 )
91 split_indices = np.where(distances_to_next_index != 1)[0]
92 event_lengths = np.split(distances_to_next_index, split_indices)
93
94 # set correct length for first event
95 event_lengths[0] = np.append(event_lengths[0], 1)
96
97 self._times_above_threshold = [
98 time_step * len(event_lengths[i]) for i in range(len(event_lengths))
99 ]
100
101 self._number_of_crossings.append(len(event_lengths))
102 if indices_above_threshold[0] == 0:
103 # Don't count the first event if there is no crossing.
104 self._number_of_crossings[-1] -= 1
105
106 self._average_times.append(np.mean(self._times_above_threshold))
107 self._rms_times.append(np.std(self._times_above_threshold))
108
109 self.events_per_threshold.update({threshold: self._times_above_threshold})
110
111 def hist(self, bins=32):
112 """
113 Computes the probability density function of the time above each value
114 in ``thresholds``.
115
116 Parameters
117 ----------
118 bins : int, default: 32
119 The number of bins in the estimation of the PDF above ``thresholds``.
120
121 Returns
122 -------
123 hist: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )
124 For each value in ``thresholds``, returns the estimated PDF of time
125 above threshold.
126
127 bin_centers: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )
128 Bin centers for ``hist``.
129
130 Raises
131 ------
132 `TypeError`
133 If ``bins`` is not a positive integer.
134
135 Examples
136 --------
137 >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics
138 >>> signal = [0, 0, 2, 0, 4]
139 >>> thresholds = [1, 3, 5]
140 >>> time_step = 1
141 >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)
142 >>> excess_statistics.hist(2)
143 (array([[0., 2.],
144 [0., 2.],
145 [0., 0.]]), array([[0.75, 1.25],
146 [0.75, 1.25],
147 [0. , 0. ]]))
148 """
149
150 if not isinstance(bins, numbers.Integral):
151 raise TypeError("bins must be an integer")
152
153 hist = np.zeros((len(self.events_per_threshold), bins))
154 bin_centers = np.zeros((len(self.events_per_threshold), bins))
155
156 for i, threshold in enumerate(self.events_per_threshold.keys()):
157 if len(self.events_per_threshold[threshold]) >= 1:
158 hist[i, :], bin_edges = np.histogram(
159 self.events_per_threshold[threshold], bins=bins, density=True
160 )
161 bin_centers[i, :] = (bin_edges[1:] + bin_edges[:-1]) / 2
162 return hist, bin_centers
163
164 @property
165 def total_time_above_threshold(self):
166 """
167 Total time above threshold(s).
168
169 Returns
170 -------
171 total_time_above_threshold: 1D |array_like|
172 Total time above threshold for each value in ``thresholds``.
173 """
174
175 return self._total_time_above_threshold
176
177 @property
178 def number_of_crossings(self):
179 """
180 Total number of upwards crossings for threshold(s).
181
182 Returns
183 -------
184 number_of_crossings: 1D |array_like|
185 Total number of upwards crossings for each value in ``thresholds``.
186 """
187
188 return self._number_of_crossings
189
190 @property
191 def average_times(self):
192 """
193 Average time above threshold(s).
194
195 Returns
196 -------
197 average_times: 1D |array_like|
198 Average time above each value in ``thresholds``.
199 """
200
201 return self._average_times
202
203 @property
204 def rms_times(self):
205 """
206 Root-mean-square values of time above threshold(s).
207
208 Returns
209 -------
210 rms_times: 1D |array_like|
211 Root-mean-square values of time above each value in ``thresholds``.
212 """
213
214 return self._rms_times
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/analysis/time_series/excess_statistics.py b/plasmapy/analysis/time_series/excess_statistics.py
--- a/plasmapy/analysis/time_series/excess_statistics.py
+++ b/plasmapy/analysis/time_series/excess_statistics.py
@@ -9,6 +9,7 @@
__all__ = ["ExcessStatistics"]
+import astropy.units as u
import numbers
import numpy as np
@@ -98,6 +99,9 @@
time_step * len(event_lengths[i]) for i in range(len(event_lengths))
]
+ if isinstance(time_step, u.Quantity):
+ self._times_above_threshold *= time_step.unit
+
self._number_of_crossings.append(len(event_lengths))
if indices_above_threshold[0] == 0:
# Don't count the first event if there is no crossing.
|
{"golden_diff": "diff --git a/plasmapy/analysis/time_series/excess_statistics.py b/plasmapy/analysis/time_series/excess_statistics.py\n--- a/plasmapy/analysis/time_series/excess_statistics.py\n+++ b/plasmapy/analysis/time_series/excess_statistics.py\n@@ -9,6 +9,7 @@\n __all__ = [\"ExcessStatistics\"]\n \n \n+import astropy.units as u\n import numbers\n import numpy as np\n \n@@ -98,6 +99,9 @@\n time_step * len(event_lengths[i]) for i in range(len(event_lengths))\n ]\n \n+ if isinstance(time_step, u.Quantity):\n+ self._times_above_threshold *= time_step.unit\n+\n self._number_of_crossings.append(len(event_lengths))\n if indices_above_threshold[0] == 0:\n # Don't count the first event if there is no crossing.\n", "issue": "ExcessStatistics class throws an error for `time_step` with an astropy unit \n### Bug description\n\nThe current implementation of the `ExcessStatistics` class doesn't allow astropy units for the parameter `time_step`. See example below.\n\n### Expected outcome\n\nIdeally, `total_time_above_threshold`, `average_times` and `rms_times` should have the same units as `time_step`.\n\n### Minimal complete verifiable example\n\n```Python\nfrom plasmapy.analysis.time_series.excess_statistics import ExcessStatistics\r\nimport astropy.units as u\r\n\r\nsignal = [0, 0, 2, 2, 0, 4]\r\nthresholds = 1\r\ntime_step = 1 * u.s\r\n\r\nexcess_statistics = ExcessStatistics(signal, thresholds, time_step)\n```\n\n\n### Package versions\n\nv2023.5.1\n\n### Additional context\n\nThis is also relevant for PR #2275. One could also add a check whether time units are used for `time_step` or would this be to cumbersome? \r\n\r\nSince I implemented the `ExcessStatistics` class I would be happy to be assigned to this issue.\r\n\n", "before_files": [{"content": "\"\"\"\nFunctionality to calculate excess statistics of time series.\n\n.. attention::\n\n |expect-api-changes|\n\"\"\"\n\n__all__ = [\"ExcessStatistics\"]\n\n\nimport numbers\nimport numpy as np\n\nfrom collections.abc import Iterable\n\n\nclass ExcessStatistics:\n \"\"\"\n Calculate total time, number of upwards crossings, average time and\n root-mean-square time above given thresholds of a sequence.\n\n Parameters\n ----------\n signal : 1D |array_like|\n Signal to be analyzed.\n\n thresholds : 1D |array_like|\n Threshold values.\n\n time_step : int\n Time step of ``signal``.\n\n Raises\n ------\n `ValueError`\n If ``time_step`` \u2264 0.\n\n Example\n -------\n >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics\n >>> signal = [0, 0, 2, 2, 0, 4]\n >>> thresholds = [1, 3, 5]\n >>> time_step = 1\n >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)\n >>> excess_statistics.total_time_above_threshold\n [3, 1, 0]\n >>> excess_statistics.number_of_crossings\n [2, 1, 0]\n >>> excess_statistics.average_times\n [1.5, 1.0, 0]\n >>> excess_statistics.rms_times\n [0.5, 0.0, 0]\n \"\"\"\n\n def __init__(self, signal, thresholds, time_step):\n if time_step <= 0:\n raise ValueError(\"time_step must be positive\")\n\n # make sure thresholds is an iterable\n if not isinstance(thresholds, Iterable):\n thresholds = [thresholds]\n\n self._total_time_above_threshold = []\n self._number_of_crossings = []\n self._average_times = []\n self._rms_times = []\n self.events_per_threshold = {}\n\n self._calculate_excess_statistics(signal, thresholds, time_step)\n\n def _calculate_excess_statistics(self, signal, thresholds, time_step):\n for threshold in thresholds:\n indices_above_threshold = np.where(np.array(signal) > threshold)[0]\n\n if len(indices_above_threshold) == 0:\n self._times_above_threshold = []\n self._total_time_above_threshold.append(0)\n self._number_of_crossings.append(0)\n self._average_times.append(0)\n self._rms_times.append(0)\n\n else:\n self._total_time_above_threshold.append(\n time_step * len(indices_above_threshold)\n )\n\n distances_to_next_index = (\n indices_above_threshold[1:] - indices_above_threshold[:-1]\n )\n split_indices = np.where(distances_to_next_index != 1)[0]\n event_lengths = np.split(distances_to_next_index, split_indices)\n\n # set correct length for first event\n event_lengths[0] = np.append(event_lengths[0], 1)\n\n self._times_above_threshold = [\n time_step * len(event_lengths[i]) for i in range(len(event_lengths))\n ]\n\n self._number_of_crossings.append(len(event_lengths))\n if indices_above_threshold[0] == 0:\n # Don't count the first event if there is no crossing.\n self._number_of_crossings[-1] -= 1\n\n self._average_times.append(np.mean(self._times_above_threshold))\n self._rms_times.append(np.std(self._times_above_threshold))\n\n self.events_per_threshold.update({threshold: self._times_above_threshold})\n\n def hist(self, bins=32):\n \"\"\"\n Computes the probability density function of the time above each value\n in ``thresholds``.\n\n Parameters\n ----------\n bins : int, default: 32\n The number of bins in the estimation of the PDF above ``thresholds``.\n\n Returns\n -------\n hist: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )\n For each value in ``thresholds``, returns the estimated PDF of time\n above threshold.\n\n bin_centers: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )\n Bin centers for ``hist``.\n\n Raises\n ------\n `TypeError`\n If ``bins`` is not a positive integer.\n\n Examples\n --------\n >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics\n >>> signal = [0, 0, 2, 0, 4]\n >>> thresholds = [1, 3, 5]\n >>> time_step = 1\n >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)\n >>> excess_statistics.hist(2)\n (array([[0., 2.],\n [0., 2.],\n [0., 0.]]), array([[0.75, 1.25],\n [0.75, 1.25],\n [0. , 0. ]]))\n \"\"\"\n\n if not isinstance(bins, numbers.Integral):\n raise TypeError(\"bins must be an integer\")\n\n hist = np.zeros((len(self.events_per_threshold), bins))\n bin_centers = np.zeros((len(self.events_per_threshold), bins))\n\n for i, threshold in enumerate(self.events_per_threshold.keys()):\n if len(self.events_per_threshold[threshold]) >= 1:\n hist[i, :], bin_edges = np.histogram(\n self.events_per_threshold[threshold], bins=bins, density=True\n )\n bin_centers[i, :] = (bin_edges[1:] + bin_edges[:-1]) / 2\n return hist, bin_centers\n\n @property\n def total_time_above_threshold(self):\n \"\"\"\n Total time above threshold(s).\n\n Returns\n -------\n total_time_above_threshold: 1D |array_like|\n Total time above threshold for each value in ``thresholds``.\n \"\"\"\n\n return self._total_time_above_threshold\n\n @property\n def number_of_crossings(self):\n \"\"\"\n Total number of upwards crossings for threshold(s).\n\n Returns\n -------\n number_of_crossings: 1D |array_like|\n Total number of upwards crossings for each value in ``thresholds``.\n \"\"\"\n\n return self._number_of_crossings\n\n @property\n def average_times(self):\n \"\"\"\n Average time above threshold(s).\n\n Returns\n -------\n average_times: 1D |array_like|\n Average time above each value in ``thresholds``.\n \"\"\"\n\n return self._average_times\n\n @property\n def rms_times(self):\n \"\"\"\n Root-mean-square values of time above threshold(s).\n\n Returns\n -------\n rms_times: 1D |array_like|\n Root-mean-square values of time above each value in ``thresholds``.\n \"\"\"\n\n return self._rms_times\n", "path": "plasmapy/analysis/time_series/excess_statistics.py"}], "after_files": [{"content": "\"\"\"\nFunctionality to calculate excess statistics of time series.\n\n.. attention::\n\n |expect-api-changes|\n\"\"\"\n\n__all__ = [\"ExcessStatistics\"]\n\n\nimport astropy.units as u\nimport numbers\nimport numpy as np\n\nfrom collections.abc import Iterable\n\n\nclass ExcessStatistics:\n \"\"\"\n Calculate total time, number of upwards crossings, average time and\n root-mean-square time above given thresholds of a sequence.\n\n Parameters\n ----------\n signal : 1D |array_like|\n Signal to be analyzed.\n\n thresholds : 1D |array_like|\n Threshold values.\n\n time_step : int\n Time step of ``signal``.\n\n Raises\n ------\n `ValueError`\n If ``time_step`` \u2264 0.\n\n Example\n -------\n >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics\n >>> signal = [0, 0, 2, 2, 0, 4]\n >>> thresholds = [1, 3, 5]\n >>> time_step = 1\n >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)\n >>> excess_statistics.total_time_above_threshold\n [3, 1, 0]\n >>> excess_statistics.number_of_crossings\n [2, 1, 0]\n >>> excess_statistics.average_times\n [1.5, 1.0, 0]\n >>> excess_statistics.rms_times\n [0.5, 0.0, 0]\n \"\"\"\n\n def __init__(self, signal, thresholds, time_step):\n if time_step <= 0:\n raise ValueError(\"time_step must be positive\")\n\n # make sure thresholds is an iterable\n if not isinstance(thresholds, Iterable):\n thresholds = [thresholds]\n\n self._total_time_above_threshold = []\n self._number_of_crossings = []\n self._average_times = []\n self._rms_times = []\n self.events_per_threshold = {}\n\n self._calculate_excess_statistics(signal, thresholds, time_step)\n\n def _calculate_excess_statistics(self, signal, thresholds, time_step):\n for threshold in thresholds:\n indices_above_threshold = np.where(np.array(signal) > threshold)[0]\n\n if len(indices_above_threshold) == 0:\n self._times_above_threshold = []\n self._total_time_above_threshold.append(0)\n self._number_of_crossings.append(0)\n self._average_times.append(0)\n self._rms_times.append(0)\n\n else:\n self._total_time_above_threshold.append(\n time_step * len(indices_above_threshold)\n )\n\n distances_to_next_index = (\n indices_above_threshold[1:] - indices_above_threshold[:-1]\n )\n split_indices = np.where(distances_to_next_index != 1)[0]\n event_lengths = np.split(distances_to_next_index, split_indices)\n\n # set correct length for first event\n event_lengths[0] = np.append(event_lengths[0], 1)\n\n self._times_above_threshold = [\n time_step * len(event_lengths[i]) for i in range(len(event_lengths))\n ]\n\n if isinstance(time_step, u.Quantity):\n self._times_above_threshold *= time_step.unit\n\n self._number_of_crossings.append(len(event_lengths))\n if indices_above_threshold[0] == 0:\n # Don't count the first event if there is no crossing.\n self._number_of_crossings[-1] -= 1\n\n self._average_times.append(np.mean(self._times_above_threshold))\n self._rms_times.append(np.std(self._times_above_threshold))\n\n self.events_per_threshold.update({threshold: self._times_above_threshold})\n\n def hist(self, bins=32):\n \"\"\"\n Computes the probability density function of the time above each value\n in ``thresholds``.\n\n Parameters\n ----------\n bins : int, default: 32\n The number of bins in the estimation of the PDF above ``thresholds``.\n\n Returns\n -------\n hist: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )\n For each value in ``thresholds``, returns the estimated PDF of time\n above threshold.\n\n bin_centers: 2D `~numpy.ndarray`, shape (``thresholds.size``, ``bins`` )\n Bin centers for ``hist``.\n\n Raises\n ------\n `TypeError`\n If ``bins`` is not a positive integer.\n\n Examples\n --------\n >>> from plasmapy.analysis.time_series.excess_statistics import ExcessStatistics\n >>> signal = [0, 0, 2, 0, 4]\n >>> thresholds = [1, 3, 5]\n >>> time_step = 1\n >>> excess_statistics = ExcessStatistics(signal, thresholds, time_step)\n >>> excess_statistics.hist(2)\n (array([[0., 2.],\n [0., 2.],\n [0., 0.]]), array([[0.75, 1.25],\n [0.75, 1.25],\n [0. , 0. ]]))\n \"\"\"\n\n if not isinstance(bins, numbers.Integral):\n raise TypeError(\"bins must be an integer\")\n\n hist = np.zeros((len(self.events_per_threshold), bins))\n bin_centers = np.zeros((len(self.events_per_threshold), bins))\n\n for i, threshold in enumerate(self.events_per_threshold.keys()):\n if len(self.events_per_threshold[threshold]) >= 1:\n hist[i, :], bin_edges = np.histogram(\n self.events_per_threshold[threshold], bins=bins, density=True\n )\n bin_centers[i, :] = (bin_edges[1:] + bin_edges[:-1]) / 2\n return hist, bin_centers\n\n @property\n def total_time_above_threshold(self):\n \"\"\"\n Total time above threshold(s).\n\n Returns\n -------\n total_time_above_threshold: 1D |array_like|\n Total time above threshold for each value in ``thresholds``.\n \"\"\"\n\n return self._total_time_above_threshold\n\n @property\n def number_of_crossings(self):\n \"\"\"\n Total number of upwards crossings for threshold(s).\n\n Returns\n -------\n number_of_crossings: 1D |array_like|\n Total number of upwards crossings for each value in ``thresholds``.\n \"\"\"\n\n return self._number_of_crossings\n\n @property\n def average_times(self):\n \"\"\"\n Average time above threshold(s).\n\n Returns\n -------\n average_times: 1D |array_like|\n Average time above each value in ``thresholds``.\n \"\"\"\n\n return self._average_times\n\n @property\n def rms_times(self):\n \"\"\"\n Root-mean-square values of time above threshold(s).\n\n Returns\n -------\n rms_times: 1D |array_like|\n Root-mean-square values of time above each value in ``thresholds``.\n \"\"\"\n\n return self._rms_times\n", "path": "plasmapy/analysis/time_series/excess_statistics.py"}]}
| 2,544 | 189 |
gh_patches_debug_4207
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2164
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow using tuple (and/or iterable) as an alias to the GraphQL type
## Feature Request Type
Alteration (enhancement/optimization) of existing feature(s)
## Description
Returning a `tuple` when the attribute is hinted as `list` [works](https://play.strawberry.rocks/?gist=0815d1a1f0c58a613bd356cbeb45c8a1).
But hinting the return type _correctly as a tuple_ causes an error:
```python
@strawberry.type
class Dictionary:
entries: tuple[Entry]
sources: tuple[Source]
```
```
TypeError: Unexpected type tuple[Entry]
```
Our code uses tuples and iterables whenever appropriate for robustness, efficiency, and documentation. We also use strict type hinting. It'd be great for these read-only sequences to be explicitly supported.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/annotation.py`
Content:
```
1 import sys
2 import typing
3 from collections import abc
4 from enum import Enum
5 from typing import ( # type: ignore[attr-defined]
6 TYPE_CHECKING,
7 Any,
8 Dict,
9 List,
10 Optional,
11 TypeVar,
12 Union,
13 _eval_type,
14 )
15
16 from typing_extensions import Annotated, get_args, get_origin
17
18 from strawberry.private import is_private
19
20
21 try:
22 from typing import ForwardRef
23 except ImportError: # pragma: no cover
24 # ForwardRef is private in python 3.6 and 3.7
25 from typing import _ForwardRef as ForwardRef # type: ignore
26
27 from strawberry.custom_scalar import ScalarDefinition
28 from strawberry.enum import EnumDefinition
29 from strawberry.lazy_type import LazyType, StrawberryLazyReference
30 from strawberry.type import (
31 StrawberryList,
32 StrawberryOptional,
33 StrawberryType,
34 StrawberryTypeVar,
35 )
36 from strawberry.types.types import TypeDefinition
37 from strawberry.unset import UNSET
38 from strawberry.utils.typing import is_generic, is_list, is_type_var, is_union
39
40
41 if TYPE_CHECKING:
42 from strawberry.union import StrawberryUnion
43
44
45 ASYNC_TYPES = (
46 abc.AsyncGenerator,
47 abc.AsyncIterable,
48 abc.AsyncIterator,
49 typing.AsyncContextManager,
50 typing.AsyncGenerator,
51 typing.AsyncIterable,
52 typing.AsyncIterator,
53 )
54
55
56 class StrawberryAnnotation:
57 def __init__(
58 self, annotation: Union[object, str], *, namespace: Optional[Dict] = None
59 ):
60 self.annotation = annotation
61 self.namespace = namespace
62
63 def __eq__(self, other: object) -> bool:
64 if not isinstance(other, StrawberryAnnotation):
65 return NotImplemented
66
67 return self.resolve() == other.resolve()
68
69 @staticmethod
70 def parse_annotated(annotation: object) -> object:
71 from strawberry.auto import StrawberryAuto
72
73 if get_origin(annotation) is Annotated:
74 annotated_args = get_args(annotation)
75 annotation_type = annotated_args[0]
76
77 for arg in annotated_args[1:]:
78 if isinstance(arg, StrawberryLazyReference):
79 assert isinstance(annotation_type, ForwardRef)
80
81 return arg.resolve_forward_ref(annotation_type)
82
83 if isinstance(arg, StrawberryAuto):
84 return arg
85
86 return StrawberryAnnotation.parse_annotated(annotation_type)
87
88 if is_union(annotation):
89 return Union[
90 tuple(
91 StrawberryAnnotation.parse_annotated(arg)
92 for arg in get_args(annotation)
93 ) # pyright: ignore
94 ] # pyright: ignore
95
96 if is_list(annotation):
97 return List[StrawberryAnnotation.parse_annotated(get_args(annotation)[0])] # type: ignore # noqa: E501
98
99 return annotation
100
101 def resolve(self) -> Union[StrawberryType, type]:
102 annotation = self.parse_annotated(self.annotation)
103
104 if isinstance(self.annotation, str):
105 annotation = ForwardRef(self.annotation)
106
107 evaled_type = _eval_type(annotation, self.namespace, None)
108
109 if is_private(evaled_type):
110 return evaled_type
111 if self._is_async_type(evaled_type):
112 evaled_type = self._strip_async_type(evaled_type)
113 if self._is_lazy_type(evaled_type):
114 return evaled_type
115
116 if self._is_generic(evaled_type):
117 if any(is_type_var(type_) for type_ in evaled_type.__args__):
118 return evaled_type
119 return self.create_concrete_type(evaled_type)
120
121 # Simply return objects that are already StrawberryTypes
122 if self._is_strawberry_type(evaled_type):
123 return evaled_type
124
125 # Everything remaining should be a raw annotation that needs to be turned into
126 # a StrawberryType
127 if self._is_enum(evaled_type):
128 return self.create_enum(evaled_type)
129 if self._is_list(evaled_type):
130 return self.create_list(evaled_type)
131 elif self._is_optional(evaled_type):
132 return self.create_optional(evaled_type)
133 elif self._is_union(evaled_type):
134 return self.create_union(evaled_type)
135 elif is_type_var(evaled_type):
136 return self.create_type_var(evaled_type)
137
138 # TODO: Raise exception now, or later?
139 # ... raise NotImplementedError(f"Unknown type {evaled_type}")
140 return evaled_type
141
142 def create_concrete_type(self, evaled_type: type) -> type:
143 if _is_object_type(evaled_type):
144 type_definition: TypeDefinition
145 type_definition = evaled_type._type_definition # type: ignore
146 return type_definition.resolve_generic(evaled_type)
147
148 raise ValueError(f"Not supported {evaled_type}")
149
150 def create_enum(self, evaled_type: Any) -> EnumDefinition:
151 return evaled_type._enum_definition
152
153 def create_list(self, evaled_type: Any) -> StrawberryList:
154 of_type = StrawberryAnnotation(
155 annotation=evaled_type.__args__[0],
156 namespace=self.namespace,
157 ).resolve()
158
159 return StrawberryList(of_type)
160
161 def create_optional(self, evaled_type: Any) -> StrawberryOptional:
162 types = evaled_type.__args__
163 non_optional_types = tuple(
164 filter(
165 lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721
166 types,
167 )
168 )
169
170 # Note that passing a single type to `Union` is equivalent to not using `Union`
171 # at all. This allows us to not di any checks for how many types have been
172 # passed as we can safely use `Union` for both optional types
173 # (e.g. `Optional[str]`) and optional unions (e.g.
174 # `Optional[Union[TypeA, TypeB]]`)
175 child_type = Union[non_optional_types] # type: ignore
176
177 of_type = StrawberryAnnotation(
178 annotation=child_type,
179 namespace=self.namespace,
180 ).resolve()
181
182 return StrawberryOptional(of_type)
183
184 def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:
185 return StrawberryTypeVar(evaled_type)
186
187 def create_union(self, evaled_type) -> "StrawberryUnion":
188 # Prevent import cycles
189 from strawberry.union import StrawberryUnion
190
191 # TODO: Deal with Forward References/origin
192 if isinstance(evaled_type, StrawberryUnion):
193 return evaled_type
194
195 types = evaled_type.__args__
196 union = StrawberryUnion(
197 type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),
198 )
199 return union
200
201 @classmethod
202 def _is_async_type(cls, annotation: type) -> bool:
203 origin = getattr(annotation, "__origin__", None)
204 return origin in ASYNC_TYPES
205
206 @classmethod
207 def _is_enum(cls, annotation: Any) -> bool:
208 # Type aliases are not types so we need to make sure annotation can go into
209 # issubclass
210 if not isinstance(annotation, type):
211 return False
212 return issubclass(annotation, Enum)
213
214 @classmethod
215 def _is_generic(cls, annotation: Any) -> bool:
216 if hasattr(annotation, "__origin__"):
217 return is_generic(annotation.__origin__)
218
219 return False
220
221 @classmethod
222 def _is_lazy_type(cls, annotation: Any) -> bool:
223 return isinstance(annotation, LazyType)
224
225 @classmethod
226 def _is_optional(cls, annotation: Any) -> bool:
227 """Returns True if the annotation is Optional[SomeType]"""
228
229 # Optionals are represented as unions
230 if not cls._is_union(annotation):
231 return False
232
233 types = annotation.__args__
234
235 # A Union to be optional needs to have at least one None type
236 return any(x is type(None) for x in types) # noqa: E721
237
238 @classmethod
239 def _is_list(cls, annotation: Any) -> bool:
240 """Returns True if annotation is a List"""
241
242 annotation_origin = getattr(annotation, "__origin__", None)
243
244 return annotation_origin == list
245
246 @classmethod
247 def _is_strawberry_type(cls, evaled_type: Any) -> bool:
248 # Prevent import cycles
249 from strawberry.union import StrawberryUnion
250
251 if isinstance(evaled_type, EnumDefinition):
252 return True
253 elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject
254 return True
255 # TODO: add support for StrawberryInterface when implemented
256 elif isinstance(evaled_type, StrawberryList):
257 return True
258 elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject
259 return True
260 elif isinstance(evaled_type, TypeDefinition):
261 return True
262 elif isinstance(evaled_type, StrawberryOptional):
263 return True
264 elif isinstance(
265 evaled_type, ScalarDefinition
266 ): # TODO: Replace with StrawberryScalar
267 return True
268 elif isinstance(evaled_type, StrawberryUnion):
269 return True
270
271 return False
272
273 @classmethod
274 def _is_union(cls, annotation: Any) -> bool:
275 """Returns True if annotation is a Union"""
276
277 # this check is needed because unions declared with the new syntax `A | B`
278 # don't have a `__origin__` property on them, but they are instances of
279 # `UnionType`, which is only available in Python 3.10+
280 if sys.version_info >= (3, 10):
281 from types import UnionType
282
283 if isinstance(annotation, UnionType):
284 return True
285
286 # unions declared as Union[A, B] fall through to this check, even on python 3.10+
287
288 annotation_origin = getattr(annotation, "__origin__", None)
289
290 return annotation_origin is typing.Union
291
292 @classmethod
293 def _strip_async_type(cls, annotation) -> type:
294 return annotation.__args__[0]
295
296 @classmethod
297 def _strip_lazy_type(cls, annotation: LazyType) -> type:
298 return annotation.resolve_type()
299
300
301 ################################################################################
302 # Temporary functions to be removed with new types
303 ################################################################################
304
305
306 def _is_input_type(type_: Any) -> bool:
307 if not _is_object_type(type_):
308 return False
309
310 return type_._type_definition.is_input
311
312
313 def _is_object_type(type_: Any) -> bool:
314 # isinstance(type_, StrawberryObjectType) # noqa: E800
315 return hasattr(type_, "_type_definition")
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/strawberry/annotation.py b/strawberry/annotation.py
--- a/strawberry/annotation.py
+++ b/strawberry/annotation.py
@@ -241,7 +241,11 @@
annotation_origin = getattr(annotation, "__origin__", None)
- return annotation_origin == list
+ return (
+ annotation_origin == list
+ or annotation_origin == tuple
+ or annotation_origin is abc.Sequence
+ )
@classmethod
def _is_strawberry_type(cls, evaled_type: Any) -> bool:
|
{"golden_diff": "diff --git a/strawberry/annotation.py b/strawberry/annotation.py\n--- a/strawberry/annotation.py\n+++ b/strawberry/annotation.py\n@@ -241,7 +241,11 @@\n \n annotation_origin = getattr(annotation, \"__origin__\", None)\n \n- return annotation_origin == list\n+ return (\n+ annotation_origin == list\n+ or annotation_origin == tuple\n+ or annotation_origin is abc.Sequence\n+ )\n \n @classmethod\n def _is_strawberry_type(cls, evaled_type: Any) -> bool:\n", "issue": "Allow using tuple (and/or iterable) as an alias to the GraphQL type\n## Feature Request Type\r\n\r\nAlteration (enhancement/optimization) of existing feature(s)\r\n\r\n## Description\r\n\r\nReturning a `tuple` when the attribute is hinted as `list` [works](https://play.strawberry.rocks/?gist=0815d1a1f0c58a613bd356cbeb45c8a1).\r\n\r\nBut hinting the return type _correctly as a tuple_ causes an error:\r\n\r\n```python\r\[email protected]\r\nclass Dictionary:\r\n entries: tuple[Entry]\r\n sources: tuple[Source]\r\n```\r\n\r\n```\r\nTypeError: Unexpected type tuple[Entry]\r\n```\r\n\r\nOur code uses tuples and iterables whenever appropriate for robustness, efficiency, and documentation. We also use strict type hinting. It'd be great for these read-only sequences to be explicitly supported.\n", "before_files": [{"content": "import sys\nimport typing\nfrom collections import abc\nfrom enum import Enum\nfrom typing import ( # type: ignore[attr-defined]\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n TypeVar,\n Union,\n _eval_type,\n)\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.private import is_private\n\n\ntry:\n from typing import ForwardRef\nexcept ImportError: # pragma: no cover\n # ForwardRef is private in python 3.6 and 3.7\n from typing import _ForwardRef as ForwardRef # type: ignore\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.lazy_type import LazyType, StrawberryLazyReference\nfrom strawberry.type import (\n StrawberryList,\n StrawberryOptional,\n StrawberryType,\n StrawberryTypeVar,\n)\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.unset import UNSET\nfrom strawberry.utils.typing import is_generic, is_list, is_type_var, is_union\n\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n\nASYNC_TYPES = (\n abc.AsyncGenerator,\n abc.AsyncIterable,\n abc.AsyncIterator,\n typing.AsyncContextManager,\n typing.AsyncGenerator,\n typing.AsyncIterable,\n typing.AsyncIterator,\n)\n\n\nclass StrawberryAnnotation:\n def __init__(\n self, annotation: Union[object, str], *, namespace: Optional[Dict] = None\n ):\n self.annotation = annotation\n self.namespace = namespace\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, StrawberryAnnotation):\n return NotImplemented\n\n return self.resolve() == other.resolve()\n\n @staticmethod\n def parse_annotated(annotation: object) -> object:\n from strawberry.auto import StrawberryAuto\n\n if get_origin(annotation) is Annotated:\n annotated_args = get_args(annotation)\n annotation_type = annotated_args[0]\n\n for arg in annotated_args[1:]:\n if isinstance(arg, StrawberryLazyReference):\n assert isinstance(annotation_type, ForwardRef)\n\n return arg.resolve_forward_ref(annotation_type)\n\n if isinstance(arg, StrawberryAuto):\n return arg\n\n return StrawberryAnnotation.parse_annotated(annotation_type)\n\n if is_union(annotation):\n return Union[\n tuple(\n StrawberryAnnotation.parse_annotated(arg)\n for arg in get_args(annotation)\n ) # pyright: ignore\n ] # pyright: ignore\n\n if is_list(annotation):\n return List[StrawberryAnnotation.parse_annotated(get_args(annotation)[0])] # type: ignore # noqa: E501\n\n return annotation\n\n def resolve(self) -> Union[StrawberryType, type]:\n annotation = self.parse_annotated(self.annotation)\n\n if isinstance(self.annotation, str):\n annotation = ForwardRef(self.annotation)\n\n evaled_type = _eval_type(annotation, self.namespace, None)\n\n if is_private(evaled_type):\n return evaled_type\n if self._is_async_type(evaled_type):\n evaled_type = self._strip_async_type(evaled_type)\n if self._is_lazy_type(evaled_type):\n return evaled_type\n\n if self._is_generic(evaled_type):\n if any(is_type_var(type_) for type_ in evaled_type.__args__):\n return evaled_type\n return self.create_concrete_type(evaled_type)\n\n # Simply return objects that are already StrawberryTypes\n if self._is_strawberry_type(evaled_type):\n return evaled_type\n\n # Everything remaining should be a raw annotation that needs to be turned into\n # a StrawberryType\n if self._is_enum(evaled_type):\n return self.create_enum(evaled_type)\n if self._is_list(evaled_type):\n return self.create_list(evaled_type)\n elif self._is_optional(evaled_type):\n return self.create_optional(evaled_type)\n elif self._is_union(evaled_type):\n return self.create_union(evaled_type)\n elif is_type_var(evaled_type):\n return self.create_type_var(evaled_type)\n\n # TODO: Raise exception now, or later?\n # ... raise NotImplementedError(f\"Unknown type {evaled_type}\")\n return evaled_type\n\n def create_concrete_type(self, evaled_type: type) -> type:\n if _is_object_type(evaled_type):\n type_definition: TypeDefinition\n type_definition = evaled_type._type_definition # type: ignore\n return type_definition.resolve_generic(evaled_type)\n\n raise ValueError(f\"Not supported {evaled_type}\")\n\n def create_enum(self, evaled_type: Any) -> EnumDefinition:\n return evaled_type._enum_definition\n\n def create_list(self, evaled_type: Any) -> StrawberryList:\n of_type = StrawberryAnnotation(\n annotation=evaled_type.__args__[0],\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryList(of_type)\n\n def create_optional(self, evaled_type: Any) -> StrawberryOptional:\n types = evaled_type.__args__\n non_optional_types = tuple(\n filter(\n lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721\n types,\n )\n )\n\n # Note that passing a single type to `Union` is equivalent to not using `Union`\n # at all. This allows us to not di any checks for how many types have been\n # passed as we can safely use `Union` for both optional types\n # (e.g. `Optional[str]`) and optional unions (e.g.\n # `Optional[Union[TypeA, TypeB]]`)\n child_type = Union[non_optional_types] # type: ignore\n\n of_type = StrawberryAnnotation(\n annotation=child_type,\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryOptional(of_type)\n\n def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:\n return StrawberryTypeVar(evaled_type)\n\n def create_union(self, evaled_type) -> \"StrawberryUnion\":\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n # TODO: Deal with Forward References/origin\n if isinstance(evaled_type, StrawberryUnion):\n return evaled_type\n\n types = evaled_type.__args__\n union = StrawberryUnion(\n type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),\n )\n return union\n\n @classmethod\n def _is_async_type(cls, annotation: type) -> bool:\n origin = getattr(annotation, \"__origin__\", None)\n return origin in ASYNC_TYPES\n\n @classmethod\n def _is_enum(cls, annotation: Any) -> bool:\n # Type aliases are not types so we need to make sure annotation can go into\n # issubclass\n if not isinstance(annotation, type):\n return False\n return issubclass(annotation, Enum)\n\n @classmethod\n def _is_generic(cls, annotation: Any) -> bool:\n if hasattr(annotation, \"__origin__\"):\n return is_generic(annotation.__origin__)\n\n return False\n\n @classmethod\n def _is_lazy_type(cls, annotation: Any) -> bool:\n return isinstance(annotation, LazyType)\n\n @classmethod\n def _is_optional(cls, annotation: Any) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n if not cls._is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x is type(None) for x in types) # noqa: E721\n\n @classmethod\n def _is_list(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n @classmethod\n def _is_strawberry_type(cls, evaled_type: Any) -> bool:\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n if isinstance(evaled_type, EnumDefinition):\n return True\n elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject\n return True\n # TODO: add support for StrawberryInterface when implemented\n elif isinstance(evaled_type, StrawberryList):\n return True\n elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject\n return True\n elif isinstance(evaled_type, TypeDefinition):\n return True\n elif isinstance(evaled_type, StrawberryOptional):\n return True\n elif isinstance(\n evaled_type, ScalarDefinition\n ): # TODO: Replace with StrawberryScalar\n return True\n elif isinstance(evaled_type, StrawberryUnion):\n return True\n\n return False\n\n @classmethod\n def _is_union(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin is typing.Union\n\n @classmethod\n def _strip_async_type(cls, annotation) -> type:\n return annotation.__args__[0]\n\n @classmethod\n def _strip_lazy_type(cls, annotation: LazyType) -> type:\n return annotation.resolve_type()\n\n\n################################################################################\n# Temporary functions to be removed with new types\n################################################################################\n\n\ndef _is_input_type(type_: Any) -> bool:\n if not _is_object_type(type_):\n return False\n\n return type_._type_definition.is_input\n\n\ndef _is_object_type(type_: Any) -> bool:\n # isinstance(type_, StrawberryObjectType) # noqa: E800\n return hasattr(type_, \"_type_definition\")\n", "path": "strawberry/annotation.py"}], "after_files": [{"content": "import sys\nimport typing\nfrom collections import abc\nfrom enum import Enum\nfrom typing import ( # type: ignore[attr-defined]\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n TypeVar,\n Union,\n _eval_type,\n)\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.private import is_private\n\n\ntry:\n from typing import ForwardRef\nexcept ImportError: # pragma: no cover\n # ForwardRef is private in python 3.6 and 3.7\n from typing import _ForwardRef as ForwardRef # type: ignore\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.lazy_type import LazyType, StrawberryLazyReference\nfrom strawberry.type import (\n StrawberryList,\n StrawberryOptional,\n StrawberryType,\n StrawberryTypeVar,\n)\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.unset import UNSET\nfrom strawberry.utils.typing import is_generic, is_list, is_type_var, is_union\n\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n\nASYNC_TYPES = (\n abc.AsyncGenerator,\n abc.AsyncIterable,\n abc.AsyncIterator,\n typing.AsyncContextManager,\n typing.AsyncGenerator,\n typing.AsyncIterable,\n typing.AsyncIterator,\n)\n\n\nclass StrawberryAnnotation:\n def __init__(\n self, annotation: Union[object, str], *, namespace: Optional[Dict] = None\n ):\n self.annotation = annotation\n self.namespace = namespace\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, StrawberryAnnotation):\n return NotImplemented\n\n return self.resolve() == other.resolve()\n\n @staticmethod\n def parse_annotated(annotation: object) -> object:\n from strawberry.auto import StrawberryAuto\n\n if get_origin(annotation) is Annotated:\n annotated_args = get_args(annotation)\n annotation_type = annotated_args[0]\n\n for arg in annotated_args[1:]:\n if isinstance(arg, StrawberryLazyReference):\n assert isinstance(annotation_type, ForwardRef)\n\n return arg.resolve_forward_ref(annotation_type)\n\n if isinstance(arg, StrawberryAuto):\n return arg\n\n return StrawberryAnnotation.parse_annotated(annotation_type)\n\n if is_union(annotation):\n return Union[\n tuple(\n StrawberryAnnotation.parse_annotated(arg)\n for arg in get_args(annotation)\n ) # pyright: ignore\n ] # pyright: ignore\n\n if is_list(annotation):\n return List[StrawberryAnnotation.parse_annotated(get_args(annotation)[0])] # type: ignore # noqa: E501\n\n return annotation\n\n def resolve(self) -> Union[StrawberryType, type]:\n annotation = self.parse_annotated(self.annotation)\n\n if isinstance(self.annotation, str):\n annotation = ForwardRef(self.annotation)\n\n evaled_type = _eval_type(annotation, self.namespace, None)\n\n if is_private(evaled_type):\n return evaled_type\n if self._is_async_type(evaled_type):\n evaled_type = self._strip_async_type(evaled_type)\n if self._is_lazy_type(evaled_type):\n return evaled_type\n\n if self._is_generic(evaled_type):\n if any(is_type_var(type_) for type_ in evaled_type.__args__):\n return evaled_type\n return self.create_concrete_type(evaled_type)\n\n # Simply return objects that are already StrawberryTypes\n if self._is_strawberry_type(evaled_type):\n return evaled_type\n\n # Everything remaining should be a raw annotation that needs to be turned into\n # a StrawberryType\n if self._is_enum(evaled_type):\n return self.create_enum(evaled_type)\n if self._is_list(evaled_type):\n return self.create_list(evaled_type)\n elif self._is_optional(evaled_type):\n return self.create_optional(evaled_type)\n elif self._is_union(evaled_type):\n return self.create_union(evaled_type)\n elif is_type_var(evaled_type):\n return self.create_type_var(evaled_type)\n\n # TODO: Raise exception now, or later?\n # ... raise NotImplementedError(f\"Unknown type {evaled_type}\")\n return evaled_type\n\n def create_concrete_type(self, evaled_type: type) -> type:\n if _is_object_type(evaled_type):\n type_definition: TypeDefinition\n type_definition = evaled_type._type_definition # type: ignore\n return type_definition.resolve_generic(evaled_type)\n\n raise ValueError(f\"Not supported {evaled_type}\")\n\n def create_enum(self, evaled_type: Any) -> EnumDefinition:\n return evaled_type._enum_definition\n\n def create_list(self, evaled_type: Any) -> StrawberryList:\n of_type = StrawberryAnnotation(\n annotation=evaled_type.__args__[0],\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryList(of_type)\n\n def create_optional(self, evaled_type: Any) -> StrawberryOptional:\n types = evaled_type.__args__\n non_optional_types = tuple(\n filter(\n lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721\n types,\n )\n )\n\n # Note that passing a single type to `Union` is equivalent to not using `Union`\n # at all. This allows us to not di any checks for how many types have been\n # passed as we can safely use `Union` for both optional types\n # (e.g. `Optional[str]`) and optional unions (e.g.\n # `Optional[Union[TypeA, TypeB]]`)\n child_type = Union[non_optional_types] # type: ignore\n\n of_type = StrawberryAnnotation(\n annotation=child_type,\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryOptional(of_type)\n\n def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:\n return StrawberryTypeVar(evaled_type)\n\n def create_union(self, evaled_type) -> \"StrawberryUnion\":\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n # TODO: Deal with Forward References/origin\n if isinstance(evaled_type, StrawberryUnion):\n return evaled_type\n\n types = evaled_type.__args__\n union = StrawberryUnion(\n type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),\n )\n return union\n\n @classmethod\n def _is_async_type(cls, annotation: type) -> bool:\n origin = getattr(annotation, \"__origin__\", None)\n return origin in ASYNC_TYPES\n\n @classmethod\n def _is_enum(cls, annotation: Any) -> bool:\n # Type aliases are not types so we need to make sure annotation can go into\n # issubclass\n if not isinstance(annotation, type):\n return False\n return issubclass(annotation, Enum)\n\n @classmethod\n def _is_generic(cls, annotation: Any) -> bool:\n if hasattr(annotation, \"__origin__\"):\n return is_generic(annotation.__origin__)\n\n return False\n\n @classmethod\n def _is_lazy_type(cls, annotation: Any) -> bool:\n return isinstance(annotation, LazyType)\n\n @classmethod\n def _is_optional(cls, annotation: Any) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n if not cls._is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x is type(None) for x in types) # noqa: E721\n\n @classmethod\n def _is_list(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return (\n annotation_origin == list\n or annotation_origin == tuple\n or annotation_origin is abc.Sequence\n )\n\n @classmethod\n def _is_strawberry_type(cls, evaled_type: Any) -> bool:\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n if isinstance(evaled_type, EnumDefinition):\n return True\n elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject\n return True\n # TODO: add support for StrawberryInterface when implemented\n elif isinstance(evaled_type, StrawberryList):\n return True\n elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject\n return True\n elif isinstance(evaled_type, TypeDefinition):\n return True\n elif isinstance(evaled_type, StrawberryOptional):\n return True\n elif isinstance(\n evaled_type, ScalarDefinition\n ): # TODO: Replace with StrawberryScalar\n return True\n elif isinstance(evaled_type, StrawberryUnion):\n return True\n\n return False\n\n @classmethod\n def _is_union(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin is typing.Union\n\n @classmethod\n def _strip_async_type(cls, annotation) -> type:\n return annotation.__args__[0]\n\n @classmethod\n def _strip_lazy_type(cls, annotation: LazyType) -> type:\n return annotation.resolve_type()\n\n\n################################################################################\n# Temporary functions to be removed with new types\n################################################################################\n\n\ndef _is_input_type(type_: Any) -> bool:\n if not _is_object_type(type_):\n return False\n\n return type_._type_definition.is_input\n\n\ndef _is_object_type(type_: Any) -> bool:\n # isinstance(type_, StrawberryObjectType) # noqa: E800\n return hasattr(type_, \"_type_definition\")\n", "path": "strawberry/annotation.py"}]}
| 3,542 | 133 |
gh_patches_debug_789
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-372
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bbox filter from read_file doesn't take advantage of fiona filtering
In line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28
The function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same.
Line 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/file.py`
Content:
```
1 import os
2
3 import fiona
4 import numpy as np
5 from shapely.geometry import mapping
6
7 from six import iteritems
8 from geopandas import GeoDataFrame
9
10
11 def read_file(filename, **kwargs):
12 """
13 Returns a GeoDataFrame from a file.
14
15 *filename* is either the absolute or relative path to the file to be
16 opened and *kwargs* are keyword args to be passed to the `open` method
17 in the fiona library when opening the file. For more information on
18 possible keywords, type: ``import fiona; help(fiona.open)``
19 """
20 bbox = kwargs.pop('bbox', None)
21 with fiona.open(filename, **kwargs) as f:
22 crs = f.crs
23 if bbox is not None:
24 assert len(bbox)==4
25 f_filt = f.filter(bbox=bbox)
26 else:
27 f_filt = f
28 gdf = GeoDataFrame.from_features(f, crs=crs)
29
30 return gdf
31
32
33 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
34 **kwargs):
35 """
36 Write this GeoDataFrame to an OGR data source
37
38 A dictionary of supported OGR providers is available via:
39 >>> import fiona
40 >>> fiona.supported_drivers
41
42 Parameters
43 ----------
44 df : GeoDataFrame to be written
45 filename : string
46 File path or file handle to write to.
47 driver : string, default 'ESRI Shapefile'
48 The OGR format driver used to write the vector file.
49 schema : dict, default None
50 If specified, the schema dictionary is passed to Fiona to
51 better control how the file is written. If None, GeoPandas
52 will determine the schema based on each column's dtype
53
54 The *kwargs* are passed to fiona.open and can be used to write
55 to multi-layer data, store data within archives (zip files), etc.
56 """
57 if schema is None:
58 schema = infer_schema(df)
59 filename = os.path.abspath(os.path.expanduser(filename))
60 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
61 schema=schema, **kwargs) as c:
62 for feature in df.iterfeatures():
63 c.write(feature)
64
65
66 def infer_schema(df):
67 try:
68 from collections import OrderedDict
69 except ImportError:
70 from ordereddict import OrderedDict
71
72 def convert_type(in_type):
73 if in_type == object:
74 return 'str'
75 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
76 if out_type == 'long':
77 out_type = 'int'
78 return out_type
79
80 properties = OrderedDict([
81 (col, convert_type(_type)) for col, _type in
82 zip(df.columns, df.dtypes) if col != df._geometry_column_name
83 ])
84
85 geom_type = _common_geom_type(df)
86 if not geom_type:
87 raise ValueError("Geometry column cannot contain mutiple "
88 "geometry types when writing to file.")
89
90 schema = {'geometry': geom_type, 'properties': properties}
91
92 return schema
93
94
95 def _common_geom_type(df):
96 # Need to check geom_types before we write to file...
97 # Some (most?) providers expect a single geometry type:
98 # Point, LineString, or Polygon
99 geom_types = df.geometry.geom_type.unique()
100
101 from os.path import commonprefix # To find longest common prefix
102 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse
103 if not geom_type:
104 geom_type = None
105
106 return geom_type
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -25,7 +25,7 @@
f_filt = f.filter(bbox=bbox)
else:
f_filt = f
- gdf = GeoDataFrame.from_features(f, crs=crs)
+ gdf = GeoDataFrame.from_features(f_filt, crs=crs)
return gdf
|
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -25,7 +25,7 @@\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n- gdf = GeoDataFrame.from_features(f, crs=crs)\n+ gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n \n return gdf\n", "issue": "bbox filter from read_file doesn't take advantage of fiona filtering\nIn line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28\n\nThe function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same.\n\nLine 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.\n\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}], "after_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
| 1,366 | 108 |
gh_patches_debug_17391
|
rasdani/github-patches
|
git_diff
|
google__jax-4999
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error caused by shutil.rmtree
```
Traceback (most recent call last):
File "\\?\C:\Users\cloud\AppData\Local\Temp\Bazel.runfiles_vfpgffuf\runfiles\__main__\build\install_xla_in_source_tree.py", line 83, in <module>
shutil.rmtree(jaxlib_dir)
File "C:\Users\cloud\miniconda3\lib\shutil.py", line 516, in rmtree
return _rmtree_unsafe(path, onerror)
File "C:\Users\cloud\miniconda3\lib\shutil.py", line 400, in _rmtree_unsafe
onerror(os.unlink, fullname, sys.exc_info())
File "C:\Users\cloud\miniconda3\lib\shutil.py", line 398, in _rmtree_unsafe
os.unlink(fullname)
WindowsError: [Error 5] Access is denied.: 'D:\\jax\\build\\jaxlib\\cublas_kernels.pyd'
```
This only happens on rebuild.
The reason is `shutil.rmtree` will not delete readonly file on Windows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build/build_wheel.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Script that builds a jaxlib wheel, intended to be run via bazel run as part
16 # of the jaxlib build process.
17
18 # Most users should not run this script directly; use build.py instead.
19
20 import argparse
21 import functools
22 import glob
23 import os
24 import platform
25 import shutil
26 import subprocess
27 import sys
28 import tempfile
29
30 from bazel_tools.tools.python.runfiles import runfiles
31
32 parser = argparse.ArgumentParser()
33 parser.add_argument(
34 "--sources_path",
35 default=None,
36 help="Path in which the wheel's sources should be prepared. Optional. If "
37 "omitted, a temporary directory will be used.")
38 parser.add_argument(
39 "--output_path",
40 default=None,
41 required=True,
42 help="Path to which the output wheel should be written. Required.")
43 args = parser.parse_args()
44
45 r = runfiles.Create()
46
47
48 def _is_windows():
49 return sys.platform.startswith("win32")
50
51
52 def _copy_so(src_file, dst_dir, dst_filename=None):
53 src_filename = os.path.basename(src_file)
54 if not dst_filename:
55 if _is_windows() and src_filename.endswith(".so"):
56 dst_filename = src_filename[:-3] + ".pyd"
57 else:
58 dst_filename = src_filename
59 dst_file = os.path.join(dst_dir, dst_filename)
60 shutil.copy(src_file, dst_file)
61
62
63 def _copy_normal(src_file, dst_dir, dst_filename=None):
64 src_filename = os.path.basename(src_file)
65 dst_file = os.path.join(dst_dir, dst_filename or src_filename)
66 shutil.copy(src_file, dst_file)
67
68
69 def copy_file(src_file, dst_dir, dst_filename=None):
70 if src_file.endswith(".so"):
71 _copy_so(src_file, dst_dir, dst_filename=dst_filename)
72 else:
73 _copy_normal(src_file, dst_dir, dst_filename=dst_filename)
74
75 def patch_copy_xla_client_py(dst_dir):
76 with open(r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/xla_client.py")) as f:
77 src = f.read()
78 src = src.replace("from tensorflow.compiler.xla.python import xla_extension as _xla",
79 "from . import xla_extension as _xla")
80 with open(os.path.join(dst_dir, "xla_client.py"), "w") as f:
81 f.write(src)
82
83
84 def patch_copy_tpu_client_py(dst_dir):
85 with open(r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py")) as f:
86 src = f.read()
87 src = src.replace("from tensorflow.compiler.xla.python import xla_extension as _xla",
88 "from . import xla_extension as _xla")
89 src = src.replace("from tensorflow.compiler.xla.python import xla_client",
90 "from . import xla_client")
91 src = src.replace(
92 "from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client",
93 "from . import tpu_client_extension as _tpu_client")
94 with open(os.path.join(dst_dir, "tpu_client.py"), "w") as f:
95 f.write(src)
96
97
98 def prepare_wheel(sources_path):
99 """Assembles a source tree for the wheel in `sources_path`."""
100 jaxlib_dir = os.path.join(sources_path, "jaxlib")
101 os.makedirs(jaxlib_dir)
102 copy_to_jaxlib = functools.partial(copy_file, dst_dir=jaxlib_dir)
103
104 copy_file(r.Rlocation("__main__/jaxlib/setup.py"), dst_dir=sources_path)
105 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/init.py"), dst_filename="__init__.py")
106 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/lapack.so"))
107 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/_pocketfft.so"))
108 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/pocketfft_flatbuffers_py_generated.py"))
109 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/pocketfft.py"))
110 if r.Rlocation("__main__/jaxlib/cusolver_kernels.so") is not None:
111 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cusolver_kernels.so"))
112 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cublas_kernels.so"))
113 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cusolver_kernels.so"))
114 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cuda_prng_kernels.so"))
115 if r.Rlocation("__main__/jaxlib/cusolver_kernels.pyd") is not None:
116 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cusolver_kernels.pyd"))
117 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cublas_kernels.pyd"))
118 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cusolver_kernels.pyd"))
119 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cuda_prng_kernels.pyd"))
120 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/version.py"))
121 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cusolver.py"))
122 copy_to_jaxlib(r.Rlocation("__main__/jaxlib/cuda_prng.py"))
123
124 if _is_windows():
125 copy_to_jaxlib(r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/xla_extension.pyd"))
126 else:
127 copy_to_jaxlib(r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/xla_extension.so"))
128 patch_copy_xla_client_py(jaxlib_dir)
129
130 if not _is_windows():
131 copy_to_jaxlib(r.Rlocation("org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client_extension.so"))
132 patch_copy_tpu_client_py(jaxlib_dir)
133
134
135 def build_wheel(sources_path, output_path):
136 """Builds a wheel in `output_path` using the source tree in `sources_path`."""
137 platform_name = {
138 "Linux": "manylinux2010",
139 "Darwin": "macosx_10_9",
140 "Windows": "win",
141 }[platform.system()]
142 cpu_name = "amd64" if platform.system() == "Windows" else "x86_64"
143 python_tag_arg = (f"--python-tag=cp{sys.version_info.major}"
144 f"{sys.version_info.minor}")
145 platform_tag_arg = f"--plat-name={platform_name}_{cpu_name}"
146 cwd = os.getcwd()
147 os.chdir(sources_path)
148 subprocess.run([sys.executable, "setup.py", "bdist_wheel",
149 python_tag_arg, platform_tag_arg])
150 os.chdir(cwd)
151 for wheel in glob.glob(os.path.join(sources_path, "dist", "*.whl")):
152 output_file = os.path.join(output_path, os.path.basename(wheel))
153 sys.stderr.write(f"Output wheel: {output_file}\n\n")
154 sys.stderr.write(f"To install the newly-built jaxlib wheel, run:\n")
155 sys.stderr.write(f" pip install {output_file}\n\n")
156 shutil.copy(wheel, output_path)
157
158
159 tmpdir = None
160 sources_path = args.sources_path
161 if sources_path is None:
162 tmpdir = tempfile.TemporaryDirectory(prefix="jaxlib")
163 sources_path = tmpdir.name
164
165 try:
166 os.makedirs(args.output_path, exist_ok=True)
167 prepare_wheel(sources_path)
168 build_wheel(sources_path, args.output_path)
169 finally:
170 if tmpdir:
171 tmpdir.cleanup()
172
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/build/build_wheel.py b/build/build_wheel.py
--- a/build/build_wheel.py
+++ b/build/build_wheel.py
@@ -57,13 +57,19 @@
else:
dst_filename = src_filename
dst_file = os.path.join(dst_dir, dst_filename)
- shutil.copy(src_file, dst_file)
+ if _is_windows():
+ shutil.copyfile(src_file, dst_file)
+ else:
+ shutil.copy(src_file, dst_file)
def _copy_normal(src_file, dst_dir, dst_filename=None):
src_filename = os.path.basename(src_file)
dst_file = os.path.join(dst_dir, dst_filename or src_filename)
- shutil.copy(src_file, dst_file)
+ if _is_windows():
+ shutil.copyfile(src_file, dst_file)
+ else:
+ shutil.copy(src_file, dst_file)
def copy_file(src_file, dst_dir, dst_filename=None):
@@ -169,4 +175,3 @@
finally:
if tmpdir:
tmpdir.cleanup()
-
|
{"golden_diff": "diff --git a/build/build_wheel.py b/build/build_wheel.py\n--- a/build/build_wheel.py\n+++ b/build/build_wheel.py\n@@ -57,13 +57,19 @@\n else:\n dst_filename = src_filename\n dst_file = os.path.join(dst_dir, dst_filename)\n- shutil.copy(src_file, dst_file)\n+ if _is_windows():\n+ shutil.copyfile(src_file, dst_file)\n+ else:\n+ shutil.copy(src_file, dst_file)\n \n \n def _copy_normal(src_file, dst_dir, dst_filename=None):\n src_filename = os.path.basename(src_file)\n dst_file = os.path.join(dst_dir, dst_filename or src_filename)\n- shutil.copy(src_file, dst_file)\n+ if _is_windows():\n+ shutil.copyfile(src_file, dst_file)\n+ else:\n+ shutil.copy(src_file, dst_file)\n \n \n def copy_file(src_file, dst_dir, dst_filename=None):\n@@ -169,4 +175,3 @@\n finally:\n if tmpdir:\n tmpdir.cleanup()\n-\n", "issue": "Error caused by shutil.rmtree \n```\r\nTraceback (most recent call last):\r\n File \"\\\\?\\C:\\Users\\cloud\\AppData\\Local\\Temp\\Bazel.runfiles_vfpgffuf\\runfiles\\__main__\\build\\install_xla_in_source_tree.py\", line 83, in <module>\r\n shutil.rmtree(jaxlib_dir)\r\n File \"C:\\Users\\cloud\\miniconda3\\lib\\shutil.py\", line 516, in rmtree\r\n return _rmtree_unsafe(path, onerror)\r\n File \"C:\\Users\\cloud\\miniconda3\\lib\\shutil.py\", line 400, in _rmtree_unsafe\r\n onerror(os.unlink, fullname, sys.exc_info())\r\n File \"C:\\Users\\cloud\\miniconda3\\lib\\shutil.py\", line 398, in _rmtree_unsafe\r\n os.unlink(fullname)\r\nWindowsError: [Error 5] Access is denied.: 'D:\\\\jax\\\\build\\\\jaxlib\\\\cublas_kernels.pyd'\r\n```\r\n\r\nThis only happens on rebuild.\r\n\r\nThe reason is `shutil.rmtree` will not delete readonly file on Windows.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Script that builds a jaxlib wheel, intended to be run via bazel run as part\n# of the jaxlib build process.\n\n# Most users should not run this script directly; use build.py instead.\n\nimport argparse\nimport functools\nimport glob\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom bazel_tools.tools.python.runfiles import runfiles\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--sources_path\",\n default=None,\n help=\"Path in which the wheel's sources should be prepared. Optional. If \"\n \"omitted, a temporary directory will be used.\")\nparser.add_argument(\n \"--output_path\",\n default=None,\n required=True,\n help=\"Path to which the output wheel should be written. Required.\")\nargs = parser.parse_args()\n\nr = runfiles.Create()\n\n\ndef _is_windows():\n return sys.platform.startswith(\"win32\")\n\n\ndef _copy_so(src_file, dst_dir, dst_filename=None):\n src_filename = os.path.basename(src_file)\n if not dst_filename:\n if _is_windows() and src_filename.endswith(\".so\"):\n dst_filename = src_filename[:-3] + \".pyd\"\n else:\n dst_filename = src_filename\n dst_file = os.path.join(dst_dir, dst_filename)\n shutil.copy(src_file, dst_file)\n\n\ndef _copy_normal(src_file, dst_dir, dst_filename=None):\n src_filename = os.path.basename(src_file)\n dst_file = os.path.join(dst_dir, dst_filename or src_filename)\n shutil.copy(src_file, dst_file)\n\n\ndef copy_file(src_file, dst_dir, dst_filename=None):\n if src_file.endswith(\".so\"):\n _copy_so(src_file, dst_dir, dst_filename=dst_filename)\n else:\n _copy_normal(src_file, dst_dir, dst_filename=dst_filename)\n\ndef patch_copy_xla_client_py(dst_dir):\n with open(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_client.py\")) as f:\n src = f.read()\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_extension as _xla\",\n \"from . import xla_extension as _xla\")\n with open(os.path.join(dst_dir, \"xla_client.py\"), \"w\") as f:\n f.write(src)\n\n\ndef patch_copy_tpu_client_py(dst_dir):\n with open(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py\")) as f:\n src = f.read()\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_extension as _xla\",\n \"from . import xla_extension as _xla\")\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_client\",\n \"from . import xla_client\")\n src = src.replace(\n \"from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client\",\n \"from . import tpu_client_extension as _tpu_client\")\n with open(os.path.join(dst_dir, \"tpu_client.py\"), \"w\") as f:\n f.write(src)\n\n\ndef prepare_wheel(sources_path):\n \"\"\"Assembles a source tree for the wheel in `sources_path`.\"\"\"\n jaxlib_dir = os.path.join(sources_path, \"jaxlib\")\n os.makedirs(jaxlib_dir)\n copy_to_jaxlib = functools.partial(copy_file, dst_dir=jaxlib_dir)\n\n copy_file(r.Rlocation(\"__main__/jaxlib/setup.py\"), dst_dir=sources_path)\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/init.py\"), dst_filename=\"__init__.py\")\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/lapack.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/_pocketfft.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/pocketfft_flatbuffers_py_generated.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/pocketfft.py\"))\n if r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\") is not None:\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cublas_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng_kernels.so\"))\n if r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\") is not None:\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cublas_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/version.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng.py\"))\n\n if _is_windows():\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_extension.pyd\"))\n else:\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_extension.so\"))\n patch_copy_xla_client_py(jaxlib_dir)\n\n if not _is_windows():\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client_extension.so\"))\n patch_copy_tpu_client_py(jaxlib_dir)\n\n\ndef build_wheel(sources_path, output_path):\n \"\"\"Builds a wheel in `output_path` using the source tree in `sources_path`.\"\"\"\n platform_name = {\n \"Linux\": \"manylinux2010\",\n \"Darwin\": \"macosx_10_9\",\n \"Windows\": \"win\",\n }[platform.system()]\n cpu_name = \"amd64\" if platform.system() == \"Windows\" else \"x86_64\"\n python_tag_arg = (f\"--python-tag=cp{sys.version_info.major}\"\n f\"{sys.version_info.minor}\")\n platform_tag_arg = f\"--plat-name={platform_name}_{cpu_name}\"\n cwd = os.getcwd()\n os.chdir(sources_path)\n subprocess.run([sys.executable, \"setup.py\", \"bdist_wheel\",\n python_tag_arg, platform_tag_arg])\n os.chdir(cwd)\n for wheel in glob.glob(os.path.join(sources_path, \"dist\", \"*.whl\")):\n output_file = os.path.join(output_path, os.path.basename(wheel))\n sys.stderr.write(f\"Output wheel: {output_file}\\n\\n\")\n sys.stderr.write(f\"To install the newly-built jaxlib wheel, run:\\n\")\n sys.stderr.write(f\" pip install {output_file}\\n\\n\")\n shutil.copy(wheel, output_path)\n\n\ntmpdir = None\nsources_path = args.sources_path\nif sources_path is None:\n tmpdir = tempfile.TemporaryDirectory(prefix=\"jaxlib\")\n sources_path = tmpdir.name\n\ntry:\n os.makedirs(args.output_path, exist_ok=True)\n prepare_wheel(sources_path)\n build_wheel(sources_path, args.output_path)\nfinally:\n if tmpdir:\n tmpdir.cleanup()\n\n", "path": "build/build_wheel.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Script that builds a jaxlib wheel, intended to be run via bazel run as part\n# of the jaxlib build process.\n\n# Most users should not run this script directly; use build.py instead.\n\nimport argparse\nimport functools\nimport glob\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom bazel_tools.tools.python.runfiles import runfiles\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--sources_path\",\n default=None,\n help=\"Path in which the wheel's sources should be prepared. Optional. If \"\n \"omitted, a temporary directory will be used.\")\nparser.add_argument(\n \"--output_path\",\n default=None,\n required=True,\n help=\"Path to which the output wheel should be written. Required.\")\nargs = parser.parse_args()\n\nr = runfiles.Create()\n\n\ndef _is_windows():\n return sys.platform.startswith(\"win32\")\n\n\ndef _copy_so(src_file, dst_dir, dst_filename=None):\n src_filename = os.path.basename(src_file)\n if not dst_filename:\n if _is_windows() and src_filename.endswith(\".so\"):\n dst_filename = src_filename[:-3] + \".pyd\"\n else:\n dst_filename = src_filename\n dst_file = os.path.join(dst_dir, dst_filename)\n if _is_windows():\n shutil.copyfile(src_file, dst_file)\n else:\n shutil.copy(src_file, dst_file)\n\n\ndef _copy_normal(src_file, dst_dir, dst_filename=None):\n src_filename = os.path.basename(src_file)\n dst_file = os.path.join(dst_dir, dst_filename or src_filename)\n if _is_windows():\n shutil.copyfile(src_file, dst_file)\n else:\n shutil.copy(src_file, dst_file)\n\n\ndef copy_file(src_file, dst_dir, dst_filename=None):\n if src_file.endswith(\".so\"):\n _copy_so(src_file, dst_dir, dst_filename=dst_filename)\n else:\n _copy_normal(src_file, dst_dir, dst_filename=dst_filename)\n\ndef patch_copy_xla_client_py(dst_dir):\n with open(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_client.py\")) as f:\n src = f.read()\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_extension as _xla\",\n \"from . import xla_extension as _xla\")\n with open(os.path.join(dst_dir, \"xla_client.py\"), \"w\") as f:\n f.write(src)\n\n\ndef patch_copy_tpu_client_py(dst_dir):\n with open(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py\")) as f:\n src = f.read()\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_extension as _xla\",\n \"from . import xla_extension as _xla\")\n src = src.replace(\"from tensorflow.compiler.xla.python import xla_client\",\n \"from . import xla_client\")\n src = src.replace(\n \"from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client\",\n \"from . import tpu_client_extension as _tpu_client\")\n with open(os.path.join(dst_dir, \"tpu_client.py\"), \"w\") as f:\n f.write(src)\n\n\ndef prepare_wheel(sources_path):\n \"\"\"Assembles a source tree for the wheel in `sources_path`.\"\"\"\n jaxlib_dir = os.path.join(sources_path, \"jaxlib\")\n os.makedirs(jaxlib_dir)\n copy_to_jaxlib = functools.partial(copy_file, dst_dir=jaxlib_dir)\n\n copy_file(r.Rlocation(\"__main__/jaxlib/setup.py\"), dst_dir=sources_path)\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/init.py\"), dst_filename=\"__init__.py\")\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/lapack.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/_pocketfft.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/pocketfft_flatbuffers_py_generated.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/pocketfft.py\"))\n if r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\") is not None:\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cublas_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.so\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng_kernels.so\"))\n if r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\") is not None:\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cublas_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng_kernels.pyd\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/version.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cusolver.py\"))\n copy_to_jaxlib(r.Rlocation(\"__main__/jaxlib/cuda_prng.py\"))\n\n if _is_windows():\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_extension.pyd\"))\n else:\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_extension.so\"))\n patch_copy_xla_client_py(jaxlib_dir)\n\n if not _is_windows():\n copy_to_jaxlib(r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client_extension.so\"))\n patch_copy_tpu_client_py(jaxlib_dir)\n\n\ndef build_wheel(sources_path, output_path):\n \"\"\"Builds a wheel in `output_path` using the source tree in `sources_path`.\"\"\"\n platform_name = {\n \"Linux\": \"manylinux2010\",\n \"Darwin\": \"macosx_10_9\",\n \"Windows\": \"win\",\n }[platform.system()]\n cpu_name = \"amd64\" if platform.system() == \"Windows\" else \"x86_64\"\n python_tag_arg = (f\"--python-tag=cp{sys.version_info.major}\"\n f\"{sys.version_info.minor}\")\n platform_tag_arg = f\"--plat-name={platform_name}_{cpu_name}\"\n cwd = os.getcwd()\n os.chdir(sources_path)\n subprocess.run([sys.executable, \"setup.py\", \"bdist_wheel\",\n python_tag_arg, platform_tag_arg])\n os.chdir(cwd)\n for wheel in glob.glob(os.path.join(sources_path, \"dist\", \"*.whl\")):\n output_file = os.path.join(output_path, os.path.basename(wheel))\n sys.stderr.write(f\"Output wheel: {output_file}\\n\\n\")\n sys.stderr.write(f\"To install the newly-built jaxlib wheel, run:\\n\")\n sys.stderr.write(f\" pip install {output_file}\\n\\n\")\n shutil.copy(wheel, output_path)\n\n\ntmpdir = None\nsources_path = args.sources_path\nif sources_path is None:\n tmpdir = tempfile.TemporaryDirectory(prefix=\"jaxlib\")\n sources_path = tmpdir.name\n\ntry:\n os.makedirs(args.output_path, exist_ok=True)\n prepare_wheel(sources_path)\n build_wheel(sources_path, args.output_path)\nfinally:\n if tmpdir:\n tmpdir.cleanup()\n", "path": "build/build_wheel.py"}]}
| 2,702 | 231 |
gh_patches_debug_21853
|
rasdani/github-patches
|
git_diff
|
aws__aws-sam-cli-935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sam package of template with SAR metadata fails when using sam build
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
`sam package` fails, when trying to package artifacts built by `sam build`, if the template contains SAR metadata and references local files for `LicenseUrl` or `ReadmeUrl` which should get uploaded by `sam package`. Without using `sam build` that works properly, as the files are present in the template directory.
### Steps to reproduce
```
/tmp $ sam init
2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/Grammar.txt
2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/PatternGrammar.txt
[+] Initializing project structure...
[SUCCESS] - Read sam-app/README.md for further instructions on how to proceed
[*] Project initialization is now complete
/tmp $ cd sam-app/
```
* Insert minimal SAR-meta data into the template:
```
Metadata:
AWS::ServerlessRepo::Application:
Name: hello-world
Description: hello world
Author: John
SpdxLicenseId: MIT
LicenseUrl: ./LICENSE
SemanticVersion: 0.0.1
```
```
/tmp/sam-app $ echo "dummy license text" > LICENSE
/tmp/sam-app $ sam build --use-container
2019-01-14 13:45:23 Starting Build inside a container
2019-01-14 13:45:23 Found credentials in shared credentials file: ~/.aws/credentials
2019-01-14 13:45:23 Building resource 'HelloWorldFunction'
Fetching lambci/lambda:build-nodejs8.10 Docker container image......
2019-01-14 13:45:32 Mounting /tmp/sam-app/hello-world as /tmp/samcli/source:ro inside runtime container
Build Succeeded
Built Artifacts : .aws-sam/build
Built Template : .aws-sam/build/template.yaml
Commands you can use next
=========================
[*] Invoke Function: sam local invoke
[*] Package: sam package --s3-bucket <yourbucket>
'nodejs' runtime has not been validated!
Running NodejsNpmBuilder:NpmPack
Running NodejsNpmBuilder:CopySource
Running NodejsNpmBuilder:NpmInstall
/tmp/sam-app $ sam package --s3-bucket dummy
Unable to upload artifact ./LICENSE referenced by LicenseUrl parameter of AWS::ServerlessRepo::Application resource.
Parameter LicenseUrl of resource AWS::ServerlessRepo::Application refers to a file or folder that does not exist /tmp/sam-app/.aws-sam/build/LICENSE
```
### Observed result
`sam package` fails, because the `LICENSE` file isn't present in the build directory.
### Expected result
`sam package` succeeds.
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: Debian/unstable
2. `sam --version`: `SAM CLI, version 0.10.0`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samcli/commands/_utils/template.py`
Content:
```
1 """
2 Utilities to manipulate template
3 """
4
5 import os
6 import six
7 import yaml
8
9 try:
10 import pathlib
11 except ImportError:
12 import pathlib2 as pathlib
13
14 from samcli.yamlhelper import yaml_parse, yaml_dump
15
16
17 _RESOURCES_WITH_LOCAL_PATHS = {
18 "AWS::Serverless::Function": ["CodeUri"],
19 "AWS::Serverless::Api": ["DefinitionUri"],
20 "AWS::AppSync::GraphQLSchema": ["DefinitionS3Location"],
21 "AWS::AppSync::Resolver": ["RequestMappingTemplateS3Location", "ResponseMappingTemplateS3Location"],
22 "AWS::Lambda::Function": ["Code"],
23 "AWS::ApiGateway::RestApi": ["BodyS3Location"],
24 "AWS::ElasticBeanstalk::ApplicationVersion": ["SourceBundle"],
25 "AWS::CloudFormation::Stack": ["TemplateURL"],
26 "AWS::Serverless::Application": ["Location"],
27 "AWS::Lambda::LayerVersion": ["Content"],
28 "AWS::Serverless::LayerVersion": ["ContentUri"]
29 }
30
31
32 def get_template_data(template_file):
33 """
34 Read the template file, parse it as JSON/YAML and return the template as a dictionary.
35
36 Parameters
37 ----------
38 template_file : string
39 Path to the template to read
40
41 Returns
42 -------
43 Template data as a dictionary
44 """
45
46 if not pathlib.Path(template_file).exists():
47 raise ValueError("Template file not found at {}".format(template_file))
48
49 with open(template_file, 'r') as fp:
50 try:
51 return yaml_parse(fp.read())
52 except (ValueError, yaml.YAMLError) as ex:
53 raise ValueError("Failed to parse template: {}".format(str(ex)))
54
55
56 def move_template(src_template_path,
57 dest_template_path,
58 template_dict):
59 """
60 Move the SAM/CloudFormation template from ``src_template_path`` to ``dest_template_path``. For convenience, this
61 method accepts a dictionary of template data ``template_dict`` that will be written to the destination instead of
62 reading from the source file.
63
64 SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.
65 This path is always relative to the template's location. Before writing the template to ``dest_template_path`,
66 we will update these paths to be relative to the new location.
67
68 This methods updates resource properties supported by ``aws cloudformation package`` command:
69 https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html
70
71 You must use this method if you are reading a template from one location, modifying it, and writing it back to a
72 different location.
73
74 Parameters
75 ----------
76 src_template_path : str
77 Path to the original location of the template
78
79 dest_template_path : str
80 Path to the destination location where updated template should be written to
81
82 template_dict : dict
83 Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.
84 """
85
86 original_root = os.path.dirname(src_template_path)
87 new_root = os.path.dirname(dest_template_path)
88
89 # Next up, we will be writing the template to a different location. Before doing so, we should
90 # update any relative paths in the template to be relative to the new location.
91 modified_template = _update_relative_paths(template_dict,
92 original_root,
93 new_root)
94
95 with open(dest_template_path, "w") as fp:
96 fp.write(yaml_dump(modified_template))
97
98
99 def _update_relative_paths(template_dict,
100 original_root,
101 new_root):
102 """
103 SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.
104 This path is usually relative to the template's location. If the template is being moved from original location
105 ``original_root`` to new location ``new_root``, use this method to update these paths to be
106 relative to ``new_root``.
107
108 After this method is complete, it is safe to write the template to ``new_root`` without
109 breaking any relative paths.
110
111 This methods updates resource properties supported by ``aws cloudformation package`` command:
112 https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html
113
114 If a property is either an absolute path or a S3 URI, this method will not update them.
115
116
117 Parameters
118 ----------
119 template_dict : dict
120 Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.
121
122 original_root : str
123 Path to the directory where all paths were originally set relative to. This is usually the directory
124 containing the template originally
125
126 new_root : str
127 Path to the new directory that all paths set relative to after this method completes.
128
129 Returns
130 -------
131 Updated dictionary
132
133 """
134
135 for _, resource in template_dict.get("Resources", {}).items():
136 resource_type = resource.get("Type")
137
138 if resource_type not in _RESOURCES_WITH_LOCAL_PATHS:
139 # Unknown resource. Skipping
140 continue
141
142 for path_prop_name in _RESOURCES_WITH_LOCAL_PATHS[resource_type]:
143 properties = resource.get("Properties", {})
144 path = properties.get(path_prop_name)
145
146 updated_path = _resolve_relative_to(path, original_root, new_root)
147 if not updated_path:
148 # This path does not need to get updated
149 continue
150
151 properties[path_prop_name] = updated_path
152
153 # AWS::Includes can be anywhere within the template dictionary. Hence we need to recurse through the
154 # dictionary in a separate method to find and update relative paths in there
155 template_dict = _update_aws_include_relative_path(template_dict, original_root, new_root)
156
157 return template_dict
158
159
160 def _update_aws_include_relative_path(template_dict, original_root, new_root):
161 """
162 Update relative paths in "AWS::Include" directive. This directive can be present at any part of the template,
163 and not just within resources.
164 """
165
166 for key, val in template_dict.items():
167 if key == "Fn::Transform":
168 if isinstance(val, dict) and val.get("Name") == "AWS::Include":
169 path = val.get("Parameters", {}).get("Location", {})
170 updated_path = _resolve_relative_to(path, original_root, new_root)
171 if not updated_path:
172 # This path does not need to get updated
173 continue
174
175 val["Parameters"]["Location"] = updated_path
176
177 # Recurse through all dictionary values
178 elif isinstance(val, dict):
179 _update_aws_include_relative_path(val, original_root, new_root)
180 elif isinstance(val, list):
181 for item in val:
182 if isinstance(item, dict):
183 _update_aws_include_relative_path(item, original_root, new_root)
184
185 return template_dict
186
187
188 def _resolve_relative_to(path, original_root, new_root):
189 """
190 If the given ``path`` is a relative path, then assume it is relative to ``original_root``. This method will
191 update the path to be resolve it relative to ``new_root`` and return.
192
193 Examples
194 -------
195 # Assume a file called template.txt at location /tmp/original/root/template.txt expressed as relative path
196 # We are trying to update it to be relative to /tmp/new/root instead of the /tmp/original/root
197 >>> result = _resolve_relative_to("template.txt", \
198 "/tmp/original/root", \
199 "/tmp/new/root")
200 >>> result
201 ../../original/root/template.txt
202
203 Returns
204 -------
205 Updated path if the given path is a relative path. None, if the path is not a relative path.
206 """
207
208 if not isinstance(path, six.string_types) \
209 or path.startswith("s3://") \
210 or os.path.isabs(path):
211 # Value is definitely NOT a relative path. It is either a S3 URi or Absolute path or not a string at all
212 return None
213
214 # Value is definitely a relative path. Change it relative to the destination directory
215 return os.path.relpath(
216 os.path.normpath(os.path.join(original_root, path)), # Absolute original path w.r.t ``original_root``
217 new_root) # Resolve the original path with respect to ``new_root``
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/samcli/commands/_utils/template.py b/samcli/commands/_utils/template.py
--- a/samcli/commands/_utils/template.py
+++ b/samcli/commands/_utils/template.py
@@ -14,6 +14,10 @@
from samcli.yamlhelper import yaml_parse, yaml_dump
+_METADATA_WITH_LOCAL_PATHS = {
+ "AWS::ServerlessRepo::Application": ["LicenseUrl", "ReadmeUrl"]
+}
+
_RESOURCES_WITH_LOCAL_PATHS = {
"AWS::Serverless::Function": ["CodeUri"],
"AWS::Serverless::Api": ["DefinitionUri"],
@@ -132,6 +136,22 @@
"""
+ for resource_type, properties in template_dict.get("Metadata", {}).items():
+
+ if resource_type not in _METADATA_WITH_LOCAL_PATHS:
+ # Unknown resource. Skipping
+ continue
+
+ for path_prop_name in _METADATA_WITH_LOCAL_PATHS[resource_type]:
+ path = properties.get(path_prop_name)
+
+ updated_path = _resolve_relative_to(path, original_root, new_root)
+ if not updated_path:
+ # This path does not need to get updated
+ continue
+
+ properties[path_prop_name] = updated_path
+
for _, resource in template_dict.get("Resources", {}).items():
resource_type = resource.get("Type")
|
{"golden_diff": "diff --git a/samcli/commands/_utils/template.py b/samcli/commands/_utils/template.py\n--- a/samcli/commands/_utils/template.py\n+++ b/samcli/commands/_utils/template.py\n@@ -14,6 +14,10 @@\n from samcli.yamlhelper import yaml_parse, yaml_dump\n \n \n+_METADATA_WITH_LOCAL_PATHS = {\n+ \"AWS::ServerlessRepo::Application\": [\"LicenseUrl\", \"ReadmeUrl\"]\n+}\n+\n _RESOURCES_WITH_LOCAL_PATHS = {\n \"AWS::Serverless::Function\": [\"CodeUri\"],\n \"AWS::Serverless::Api\": [\"DefinitionUri\"],\n@@ -132,6 +136,22 @@\n \n \"\"\"\n \n+ for resource_type, properties in template_dict.get(\"Metadata\", {}).items():\n+\n+ if resource_type not in _METADATA_WITH_LOCAL_PATHS:\n+ # Unknown resource. Skipping\n+ continue\n+\n+ for path_prop_name in _METADATA_WITH_LOCAL_PATHS[resource_type]:\n+ path = properties.get(path_prop_name)\n+\n+ updated_path = _resolve_relative_to(path, original_root, new_root)\n+ if not updated_path:\n+ # This path does not need to get updated\n+ continue\n+\n+ properties[path_prop_name] = updated_path\n+\n for _, resource in template_dict.get(\"Resources\", {}).items():\n resource_type = resource.get(\"Type\")\n", "issue": "sam package of template with SAR metadata fails when using sam build\n<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). \r\nIf you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->\r\n\r\n### Description\r\n\r\n`sam package` fails, when trying to package artifacts built by `sam build`, if the template contains SAR metadata and references local files for `LicenseUrl` or `ReadmeUrl` which should get uploaded by `sam package`. Without using `sam build` that works properly, as the files are present in the template directory.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n/tmp $ sam init\r\n2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/Grammar.txt\r\n2019-01-14 13:44:20 Generating grammar tables from /usr/lib/python3.7/lib2to3/PatternGrammar.txt\r\n[+] Initializing project structure...\r\n[SUCCESS] - Read sam-app/README.md for further instructions on how to proceed\r\n[*] Project initialization is now complete\r\n/tmp $ cd sam-app/\r\n```\r\n* Insert minimal SAR-meta data into the template:\r\n```\r\nMetadata:\r\n AWS::ServerlessRepo::Application:\r\n Name: hello-world \r\n Description: hello world\r\n Author: John\r\n SpdxLicenseId: MIT \r\n LicenseUrl: ./LICENSE \r\n SemanticVersion: 0.0.1\r\n```\r\n```\r\n/tmp/sam-app $ echo \"dummy license text\" > LICENSE\r\n/tmp/sam-app $ sam build --use-container\r\n2019-01-14 13:45:23 Starting Build inside a container\r\n2019-01-14 13:45:23 Found credentials in shared credentials file: ~/.aws/credentials\r\n2019-01-14 13:45:23 Building resource 'HelloWorldFunction'\r\n\r\nFetching lambci/lambda:build-nodejs8.10 Docker container image......\r\n2019-01-14 13:45:32 Mounting /tmp/sam-app/hello-world as /tmp/samcli/source:ro inside runtime container\r\n\r\nBuild Succeeded\r\n\r\nBuilt Artifacts : .aws-sam/build\r\nBuilt Template : .aws-sam/build/template.yaml\r\n\r\nCommands you can use next\r\n=========================\r\n[*] Invoke Function: sam local invoke\r\n[*] Package: sam package --s3-bucket <yourbucket>\r\n \r\n'nodejs' runtime has not been validated!\r\nRunning NodejsNpmBuilder:NpmPack\r\nRunning NodejsNpmBuilder:CopySource\r\nRunning NodejsNpmBuilder:NpmInstall\r\n/tmp/sam-app $ sam package --s3-bucket dummy\r\n\r\nUnable to upload artifact ./LICENSE referenced by LicenseUrl parameter of AWS::ServerlessRepo::Application resource.\r\nParameter LicenseUrl of resource AWS::ServerlessRepo::Application refers to a file or folder that does not exist /tmp/sam-app/.aws-sam/build/LICENSE\r\n```\r\n### Observed result\r\n\r\n`sam package` fails, because the `LICENSE` file isn't present in the build directory.\r\n\r\n### Expected result\r\n\r\n`sam package` succeeds.\r\n\r\n### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)\r\n\r\n1. OS: Debian/unstable\r\n2. `sam --version`: `SAM CLI, version 0.10.0`\n", "before_files": [{"content": "\"\"\"\nUtilities to manipulate template\n\"\"\"\n\nimport os\nimport six\nimport yaml\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\nfrom samcli.yamlhelper import yaml_parse, yaml_dump\n\n\n_RESOURCES_WITH_LOCAL_PATHS = {\n \"AWS::Serverless::Function\": [\"CodeUri\"],\n \"AWS::Serverless::Api\": [\"DefinitionUri\"],\n \"AWS::AppSync::GraphQLSchema\": [\"DefinitionS3Location\"],\n \"AWS::AppSync::Resolver\": [\"RequestMappingTemplateS3Location\", \"ResponseMappingTemplateS3Location\"],\n \"AWS::Lambda::Function\": [\"Code\"],\n \"AWS::ApiGateway::RestApi\": [\"BodyS3Location\"],\n \"AWS::ElasticBeanstalk::ApplicationVersion\": [\"SourceBundle\"],\n \"AWS::CloudFormation::Stack\": [\"TemplateURL\"],\n \"AWS::Serverless::Application\": [\"Location\"],\n \"AWS::Lambda::LayerVersion\": [\"Content\"],\n \"AWS::Serverless::LayerVersion\": [\"ContentUri\"]\n}\n\n\ndef get_template_data(template_file):\n \"\"\"\n Read the template file, parse it as JSON/YAML and return the template as a dictionary.\n\n Parameters\n ----------\n template_file : string\n Path to the template to read\n\n Returns\n -------\n Template data as a dictionary\n \"\"\"\n\n if not pathlib.Path(template_file).exists():\n raise ValueError(\"Template file not found at {}\".format(template_file))\n\n with open(template_file, 'r') as fp:\n try:\n return yaml_parse(fp.read())\n except (ValueError, yaml.YAMLError) as ex:\n raise ValueError(\"Failed to parse template: {}\".format(str(ex)))\n\n\ndef move_template(src_template_path,\n dest_template_path,\n template_dict):\n \"\"\"\n Move the SAM/CloudFormation template from ``src_template_path`` to ``dest_template_path``. For convenience, this\n method accepts a dictionary of template data ``template_dict`` that will be written to the destination instead of\n reading from the source file.\n\n SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.\n This path is always relative to the template's location. Before writing the template to ``dest_template_path`,\n we will update these paths to be relative to the new location.\n\n This methods updates resource properties supported by ``aws cloudformation package`` command:\n https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html\n\n You must use this method if you are reading a template from one location, modifying it, and writing it back to a\n different location.\n\n Parameters\n ----------\n src_template_path : str\n Path to the original location of the template\n\n dest_template_path : str\n Path to the destination location where updated template should be written to\n\n template_dict : dict\n Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.\n \"\"\"\n\n original_root = os.path.dirname(src_template_path)\n new_root = os.path.dirname(dest_template_path)\n\n # Next up, we will be writing the template to a different location. Before doing so, we should\n # update any relative paths in the template to be relative to the new location.\n modified_template = _update_relative_paths(template_dict,\n original_root,\n new_root)\n\n with open(dest_template_path, \"w\") as fp:\n fp.write(yaml_dump(modified_template))\n\n\ndef _update_relative_paths(template_dict,\n original_root,\n new_root):\n \"\"\"\n SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.\n This path is usually relative to the template's location. If the template is being moved from original location\n ``original_root`` to new location ``new_root``, use this method to update these paths to be\n relative to ``new_root``.\n\n After this method is complete, it is safe to write the template to ``new_root`` without\n breaking any relative paths.\n\n This methods updates resource properties supported by ``aws cloudformation package`` command:\n https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html\n\n If a property is either an absolute path or a S3 URI, this method will not update them.\n\n\n Parameters\n ----------\n template_dict : dict\n Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.\n\n original_root : str\n Path to the directory where all paths were originally set relative to. This is usually the directory\n containing the template originally\n\n new_root : str\n Path to the new directory that all paths set relative to after this method completes.\n\n Returns\n -------\n Updated dictionary\n\n \"\"\"\n\n for _, resource in template_dict.get(\"Resources\", {}).items():\n resource_type = resource.get(\"Type\")\n\n if resource_type not in _RESOURCES_WITH_LOCAL_PATHS:\n # Unknown resource. Skipping\n continue\n\n for path_prop_name in _RESOURCES_WITH_LOCAL_PATHS[resource_type]:\n properties = resource.get(\"Properties\", {})\n path = properties.get(path_prop_name)\n\n updated_path = _resolve_relative_to(path, original_root, new_root)\n if not updated_path:\n # This path does not need to get updated\n continue\n\n properties[path_prop_name] = updated_path\n\n # AWS::Includes can be anywhere within the template dictionary. Hence we need to recurse through the\n # dictionary in a separate method to find and update relative paths in there\n template_dict = _update_aws_include_relative_path(template_dict, original_root, new_root)\n\n return template_dict\n\n\ndef _update_aws_include_relative_path(template_dict, original_root, new_root):\n \"\"\"\n Update relative paths in \"AWS::Include\" directive. This directive can be present at any part of the template,\n and not just within resources.\n \"\"\"\n\n for key, val in template_dict.items():\n if key == \"Fn::Transform\":\n if isinstance(val, dict) and val.get(\"Name\") == \"AWS::Include\":\n path = val.get(\"Parameters\", {}).get(\"Location\", {})\n updated_path = _resolve_relative_to(path, original_root, new_root)\n if not updated_path:\n # This path does not need to get updated\n continue\n\n val[\"Parameters\"][\"Location\"] = updated_path\n\n # Recurse through all dictionary values\n elif isinstance(val, dict):\n _update_aws_include_relative_path(val, original_root, new_root)\n elif isinstance(val, list):\n for item in val:\n if isinstance(item, dict):\n _update_aws_include_relative_path(item, original_root, new_root)\n\n return template_dict\n\n\ndef _resolve_relative_to(path, original_root, new_root):\n \"\"\"\n If the given ``path`` is a relative path, then assume it is relative to ``original_root``. This method will\n update the path to be resolve it relative to ``new_root`` and return.\n\n Examples\n -------\n # Assume a file called template.txt at location /tmp/original/root/template.txt expressed as relative path\n # We are trying to update it to be relative to /tmp/new/root instead of the /tmp/original/root\n >>> result = _resolve_relative_to(\"template.txt\", \\\n \"/tmp/original/root\", \\\n \"/tmp/new/root\")\n >>> result\n ../../original/root/template.txt\n\n Returns\n -------\n Updated path if the given path is a relative path. None, if the path is not a relative path.\n \"\"\"\n\n if not isinstance(path, six.string_types) \\\n or path.startswith(\"s3://\") \\\n or os.path.isabs(path):\n # Value is definitely NOT a relative path. It is either a S3 URi or Absolute path or not a string at all\n return None\n\n # Value is definitely a relative path. Change it relative to the destination directory\n return os.path.relpath(\n os.path.normpath(os.path.join(original_root, path)), # Absolute original path w.r.t ``original_root``\n new_root) # Resolve the original path with respect to ``new_root``\n", "path": "samcli/commands/_utils/template.py"}], "after_files": [{"content": "\"\"\"\nUtilities to manipulate template\n\"\"\"\n\nimport os\nimport six\nimport yaml\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\nfrom samcli.yamlhelper import yaml_parse, yaml_dump\n\n\n_METADATA_WITH_LOCAL_PATHS = {\n \"AWS::ServerlessRepo::Application\": [\"LicenseUrl\", \"ReadmeUrl\"]\n}\n\n_RESOURCES_WITH_LOCAL_PATHS = {\n \"AWS::Serverless::Function\": [\"CodeUri\"],\n \"AWS::Serverless::Api\": [\"DefinitionUri\"],\n \"AWS::AppSync::GraphQLSchema\": [\"DefinitionS3Location\"],\n \"AWS::AppSync::Resolver\": [\"RequestMappingTemplateS3Location\", \"ResponseMappingTemplateS3Location\"],\n \"AWS::Lambda::Function\": [\"Code\"],\n \"AWS::ApiGateway::RestApi\": [\"BodyS3Location\"],\n \"AWS::ElasticBeanstalk::ApplicationVersion\": [\"SourceBundle\"],\n \"AWS::CloudFormation::Stack\": [\"TemplateURL\"],\n \"AWS::Serverless::Application\": [\"Location\"],\n \"AWS::Lambda::LayerVersion\": [\"Content\"],\n \"AWS::Serverless::LayerVersion\": [\"ContentUri\"]\n}\n\n\ndef get_template_data(template_file):\n \"\"\"\n Read the template file, parse it as JSON/YAML and return the template as a dictionary.\n\n Parameters\n ----------\n template_file : string\n Path to the template to read\n\n Returns\n -------\n Template data as a dictionary\n \"\"\"\n\n if not pathlib.Path(template_file).exists():\n raise ValueError(\"Template file not found at {}\".format(template_file))\n\n with open(template_file, 'r') as fp:\n try:\n return yaml_parse(fp.read())\n except (ValueError, yaml.YAMLError) as ex:\n raise ValueError(\"Failed to parse template: {}\".format(str(ex)))\n\n\ndef move_template(src_template_path,\n dest_template_path,\n template_dict):\n \"\"\"\n Move the SAM/CloudFormation template from ``src_template_path`` to ``dest_template_path``. For convenience, this\n method accepts a dictionary of template data ``template_dict`` that will be written to the destination instead of\n reading from the source file.\n\n SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.\n This path is always relative to the template's location. Before writing the template to ``dest_template_path`,\n we will update these paths to be relative to the new location.\n\n This methods updates resource properties supported by ``aws cloudformation package`` command:\n https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html\n\n You must use this method if you are reading a template from one location, modifying it, and writing it back to a\n different location.\n\n Parameters\n ----------\n src_template_path : str\n Path to the original location of the template\n\n dest_template_path : str\n Path to the destination location where updated template should be written to\n\n template_dict : dict\n Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.\n \"\"\"\n\n original_root = os.path.dirname(src_template_path)\n new_root = os.path.dirname(dest_template_path)\n\n # Next up, we will be writing the template to a different location. Before doing so, we should\n # update any relative paths in the template to be relative to the new location.\n modified_template = _update_relative_paths(template_dict,\n original_root,\n new_root)\n\n with open(dest_template_path, \"w\") as fp:\n fp.write(yaml_dump(modified_template))\n\n\ndef _update_relative_paths(template_dict,\n original_root,\n new_root):\n \"\"\"\n SAM/CloudFormation template can contain certain properties whose value is a relative path to a local file/folder.\n This path is usually relative to the template's location. If the template is being moved from original location\n ``original_root`` to new location ``new_root``, use this method to update these paths to be\n relative to ``new_root``.\n\n After this method is complete, it is safe to write the template to ``new_root`` without\n breaking any relative paths.\n\n This methods updates resource properties supported by ``aws cloudformation package`` command:\n https://docs.aws.amazon.com/cli/latest/reference/cloudformation/package.html\n\n If a property is either an absolute path or a S3 URI, this method will not update them.\n\n\n Parameters\n ----------\n template_dict : dict\n Dictionary containing template contents. This dictionary will be updated & written to ``dest`` location.\n\n original_root : str\n Path to the directory where all paths were originally set relative to. This is usually the directory\n containing the template originally\n\n new_root : str\n Path to the new directory that all paths set relative to after this method completes.\n\n Returns\n -------\n Updated dictionary\n\n \"\"\"\n\n for resource_type, properties in template_dict.get(\"Metadata\", {}).items():\n\n if resource_type not in _METADATA_WITH_LOCAL_PATHS:\n # Unknown resource. Skipping\n continue\n\n for path_prop_name in _METADATA_WITH_LOCAL_PATHS[resource_type]:\n path = properties.get(path_prop_name)\n\n updated_path = _resolve_relative_to(path, original_root, new_root)\n if not updated_path:\n # This path does not need to get updated\n continue\n\n properties[path_prop_name] = updated_path\n\n for _, resource in template_dict.get(\"Resources\", {}).items():\n resource_type = resource.get(\"Type\")\n\n if resource_type not in _RESOURCES_WITH_LOCAL_PATHS:\n # Unknown resource. Skipping\n continue\n\n for path_prop_name in _RESOURCES_WITH_LOCAL_PATHS[resource_type]:\n properties = resource.get(\"Properties\", {})\n path = properties.get(path_prop_name)\n\n updated_path = _resolve_relative_to(path, original_root, new_root)\n if not updated_path:\n # This path does not need to get updated\n continue\n\n properties[path_prop_name] = updated_path\n\n # AWS::Includes can be anywhere within the template dictionary. Hence we need to recurse through the\n # dictionary in a separate method to find and update relative paths in there\n template_dict = _update_aws_include_relative_path(template_dict, original_root, new_root)\n\n return template_dict\n\n\ndef _update_aws_include_relative_path(template_dict, original_root, new_root):\n \"\"\"\n Update relative paths in \"AWS::Include\" directive. This directive can be present at any part of the template,\n and not just within resources.\n \"\"\"\n\n for key, val in template_dict.items():\n if key == \"Fn::Transform\":\n if isinstance(val, dict) and val.get(\"Name\") == \"AWS::Include\":\n path = val.get(\"Parameters\", {}).get(\"Location\", {})\n updated_path = _resolve_relative_to(path, original_root, new_root)\n if not updated_path:\n # This path does not need to get updated\n continue\n\n val[\"Parameters\"][\"Location\"] = updated_path\n\n # Recurse through all dictionary values\n elif isinstance(val, dict):\n _update_aws_include_relative_path(val, original_root, new_root)\n elif isinstance(val, list):\n for item in val:\n if isinstance(item, dict):\n _update_aws_include_relative_path(item, original_root, new_root)\n\n return template_dict\n\n\ndef _resolve_relative_to(path, original_root, new_root):\n \"\"\"\n If the given ``path`` is a relative path, then assume it is relative to ``original_root``. This method will\n update the path to be resolve it relative to ``new_root`` and return.\n\n Examples\n -------\n # Assume a file called template.txt at location /tmp/original/root/template.txt expressed as relative path\n # We are trying to update it to be relative to /tmp/new/root instead of the /tmp/original/root\n >>> result = _resolve_relative_to(\"template.txt\", \\\n \"/tmp/original/root\", \\\n \"/tmp/new/root\")\n >>> result\n ../../original/root/template.txt\n\n Returns\n -------\n Updated path if the given path is a relative path. None, if the path is not a relative path.\n \"\"\"\n\n if not isinstance(path, six.string_types) \\\n or path.startswith(\"s3://\") \\\n or os.path.isabs(path):\n # Value is definitely NOT a relative path. It is either a S3 URi or Absolute path or not a string at all\n return None\n\n # Value is definitely a relative path. Change it relative to the destination directory\n return os.path.relpath(\n os.path.normpath(os.path.join(original_root, path)), # Absolute original path w.r.t ``original_root``\n new_root) # Resolve the original path with respect to ``new_root``\n", "path": "samcli/commands/_utils/template.py"}]}
| 3,314 | 311 |
gh_patches_debug_32931
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make categorical_input/numerical_input optional for TabularData.from_df
## 🐛 Bug
If I have only numerical features then I still have to pass an empty list for the categorical_input. It can be optional.

### To Reproduce
Steps to reproduce the behavior:
1. df = pd.DataFrame({'digit': [1,2,3], 'odd_even':[0,1,0]})
2. datamodule = TabularData.from_df(df, 'odd_even',
numerical_input=['digit'],
)
```
TypeError Traceback (most recent call last)
<ipython-input-122-405a8bb49976> in <module>
1 datamodule = TabularData.from_df(final_data, 'target',
----> 2 numerical_input=train_x.columns.tolist(),
3 # categorical_input=[],
4 )
TypeError: from_df() missing 1 required positional argument: 'categorical_input'
```
#### Code sample
```
df = pd.DataFrame({'digit': [1,2,3], 'odd_even':[0,1,0]})
datamodule = TabularData.from_df(df, 'odd_even',
numerical_input=['digit'],
)
```
### Expected behaviour
If only one of categorical or numerical input is passed then users should not be forced to enter an empty list.
### Environment
- PyTorch Version (e.g., 1.0): '1.7.1'
- OS (e.g., Linux): MacOS
- How you installed PyTorch (`conda`, `pip`, source): pip
- Python version:Python 3.7.9
> I would love to start my contribution to Flash by fixing this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/tabular/classification/data/data.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, List, Optional
15
16 import numpy as np
17 import pandas as pd
18 from pandas.core.frame import DataFrame
19 from sklearn.model_selection import train_test_split
20 from torch import Tensor
21
22 from flash.core.classification import ClassificationDataPipeline
23 from flash.core.data import DataPipeline
24 from flash.core.data.datamodule import DataModule
25 from flash.core.data.utils import _contains_any_tensor
26 from flash.tabular.classification.data.dataset import (
27 _compute_normalization,
28 _dfs_to_samples,
29 _generate_codes,
30 _impute,
31 _pre_transform,
32 PandasDataset,
33 )
34
35
36 class TabularDataPipeline(ClassificationDataPipeline):
37
38 def __init__(
39 self,
40 categorical_input: List,
41 numerical_input: List,
42 target: str,
43 mean: DataFrame,
44 std: DataFrame,
45 codes: Dict,
46 ):
47 self._categorical_input = categorical_input
48 self._numerical_input = numerical_input
49 self._target = target
50 self._mean = mean
51 self._std = std
52 self._codes = codes
53
54 def before_collate(self, samples: Any) -> Any:
55 """Override to apply transformations to samples"""
56 if _contains_any_tensor(samples, dtype=(Tensor, np.ndarray)):
57 return samples
58 if isinstance(samples, str):
59 samples = pd.read_csv(samples)
60 if isinstance(samples, DataFrame):
61 samples = [samples]
62 dfs = _pre_transform(
63 samples, self._numerical_input, self._categorical_input, self._codes, self._mean, self._std
64 )
65 return _dfs_to_samples(dfs, self._categorical_input, self._numerical_input)
66
67
68 class TabularData(DataModule):
69 """Data module for tabular tasks"""
70
71 def __init__(
72 self,
73 train_df: DataFrame,
74 categorical_input: List,
75 numerical_input: List,
76 target: str,
77 valid_df: Optional[DataFrame] = None,
78 test_df: Optional[DataFrame] = None,
79 batch_size: int = 2,
80 num_workers: Optional[int] = None,
81 ):
82 dfs = [train_df]
83 self._test_df = None
84
85 if valid_df is not None:
86 dfs.append(valid_df)
87
88 if test_df is not None:
89 # save for predict function
90 self._test_df = test_df.copy()
91 self._test_df.drop(target, axis=1)
92 dfs.append(test_df)
93
94 # impute missing values
95 dfs = _impute(dfs, numerical_input)
96
97 # compute train dataset stats
98 self.mean, self.std = _compute_normalization(dfs[0], numerical_input)
99
100 if dfs[0][target].dtype == object:
101 # if the target is a category, not an int
102 self.target_codes = _generate_codes(dfs, [target])
103 else:
104 self.target_codes = None
105
106 self.codes = _generate_codes(dfs, categorical_input)
107
108 dfs = _pre_transform(
109 dfs, numerical_input, categorical_input, self.codes, self.mean, self.std, target, self.target_codes
110 )
111
112 # normalize
113 self.cat_cols = categorical_input
114 self.num_cols = numerical_input
115
116 self._num_classes = len(train_df[target].unique())
117
118 train_ds = PandasDataset(dfs[0], categorical_input, numerical_input, target)
119 valid_ds = PandasDataset(dfs[1], categorical_input, numerical_input, target) if valid_df is not None else None
120 test_ds = PandasDataset(dfs[-1], categorical_input, numerical_input, target) if test_df is not None else None
121 super().__init__(train_ds, valid_ds, test_ds, batch_size=batch_size, num_workers=num_workers)
122
123 @property
124 def num_classes(self) -> int:
125 return self._num_classes
126
127 @property
128 def num_features(self) -> int:
129 return len(self.cat_cols) + len(self.num_cols)
130
131 @classmethod
132 def from_df(
133 cls,
134 train_df: DataFrame,
135 target: str,
136 categorical_input: List,
137 numerical_input: List,
138 valid_df: Optional[DataFrame] = None,
139 test_df: Optional[DataFrame] = None,
140 batch_size: int = 8,
141 num_workers: Optional[int] = None,
142 val_size: float = None,
143 test_size: float = None,
144 ):
145 """Creates a TabularData object from pandas DataFrames.
146
147 Args:
148 train_df: train data DataFrame
149 target: The column containing the class id.
150 categorical_input: The list of categorical columns.
151 numerical_input: The list of numerical columns.
152 valid_df: validation data DataFrame
153 test_df: test data DataFrame
154 batch_size: the batchsize to use for parallel loading. Defaults to 64.
155 num_workers: The number of workers to use for parallelized loading.
156 Defaults to None which equals the number of available CPU threads.
157 val_size: float between 0 and 1 to create a validation dataset from train dataset
158 test_size: float between 0 and 1 to create a test dataset from train validation
159
160 Returns:
161 TabularData: The constructed data module.
162
163 Examples::
164
165 text_data = TextClassificationData.from_files("train.csv", label_field="class", text_field="sentence")
166 """
167 if valid_df is None and isinstance(val_size, float) and isinstance(test_size, float):
168 assert 0 < val_size and val_size < 1
169 assert 0 < test_size and test_size < 1
170 train_df, valid_df = train_test_split(train_df, test_size=(val_size + test_size))
171
172 if test_df is None and isinstance(test_size, float):
173 assert 0 < test_size and test_size < 1
174 valid_df, test_df = train_test_split(valid_df, test_size=test_size)
175
176 datamodule = cls(
177 train_df=train_df,
178 target=target,
179 categorical_input=categorical_input,
180 numerical_input=numerical_input,
181 valid_df=valid_df,
182 test_df=test_df,
183 batch_size=batch_size,
184 num_workers=num_workers,
185 )
186 datamodule.data_pipeline = TabularDataPipeline(
187 categorical_input, numerical_input, target, datamodule.mean, datamodule.std, datamodule.codes
188 )
189
190 return datamodule
191
192 @classmethod
193 def from_csv(
194 cls,
195 train_csv: str,
196 target: str,
197 categorical_input: List,
198 numerical_input: List,
199 valid_csv: Optional[str] = None,
200 test_csv: Optional[str] = None,
201 batch_size: int = 8,
202 num_workers: Optional[int] = None,
203 val_size: Optional[float] = None,
204 test_size: Optional[float] = None,
205 **pandas_kwargs,
206 ):
207 """Creates a TextClassificationData object from pandas DataFrames.
208
209 Args:
210 train_csv: train data csv file.
211 target: The column containing the class id.
212 categorical_input: The list of categorical columns.
213 numerical_input: The list of numerical columns.
214 valid_csv: validation data csv file.
215 test_csv: test data csv file.
216 batch_size: the batchsize to use for parallel loading. Defaults to 64.
217 num_workers: The number of workers to use for parallelized loading.
218 Defaults to None which equals the number of available CPU threads.
219 val_size: float between 0 and 1 to create a validation dataset from train dataset
220 test_size: float between 0 and 1 to create a test dataset from train validation
221
222 Returns:
223 TabularData: The constructed data module.
224
225 Examples::
226
227 text_data = TabularData.from_files("train.csv", label_field="class", text_field="sentence")
228 """
229 train_df = pd.read_csv(train_csv, **pandas_kwargs)
230 valid_df = pd.read_csv(valid_csv, **pandas_kwargs) if valid_csv is not None else None
231 test_df = pd.read_csv(test_csv, **pandas_kwargs) if test_csv is not None else None
232 datamodule = cls.from_df(
233 train_df, target, categorical_input, numerical_input, valid_df, test_df, batch_size, num_workers, val_size,
234 test_size
235 )
236 return datamodule
237
238 @property
239 def emb_sizes(self) -> list:
240 """Recommended embedding sizes."""
241
242 # https://developers.googleblog.com/2017/11/introducing-tensorflow-feature-columns.html
243 # The following "formula" provides a general rule of thumb about the number of embedding dimensions:
244 # embedding_dimensions = number_of_categories**0.25
245
246 num_classes = [len(self.codes[cat]) for cat in self.cat_cols]
247 emb_dims = [max(int(n**0.25), 16) for n in num_classes]
248 return list(zip(num_classes, emb_dims))
249
250 @staticmethod
251 def default_pipeline() -> DataPipeline():
252 # TabularDataPipeline depends on the data
253 return DataPipeline()
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/tabular/classification/data/data.py b/flash/tabular/classification/data/data.py
--- a/flash/tabular/classification/data/data.py
+++ b/flash/tabular/classification/data/data.py
@@ -71,9 +71,9 @@
def __init__(
self,
train_df: DataFrame,
- categorical_input: List,
- numerical_input: List,
target: str,
+ categorical_input: Optional[List] = None,
+ numerical_input: Optional[List] = None,
valid_df: Optional[DataFrame] = None,
test_df: Optional[DataFrame] = None,
batch_size: int = 2,
@@ -82,6 +82,12 @@
dfs = [train_df]
self._test_df = None
+ if categorical_input is None and numerical_input is None:
+ raise RuntimeError('Both `categorical_input` and `numerical_input` are None!')
+
+ categorical_input = categorical_input if categorical_input is not None else []
+ numerical_input = numerical_input if numerical_input is not None else []
+
if valid_df is not None:
dfs.append(valid_df)
@@ -133,8 +139,8 @@
cls,
train_df: DataFrame,
target: str,
- categorical_input: List,
- numerical_input: List,
+ categorical_input: Optional[List] = None,
+ numerical_input: Optional[List] = None,
valid_df: Optional[DataFrame] = None,
test_df: Optional[DataFrame] = None,
batch_size: int = 8,
@@ -194,8 +200,8 @@
cls,
train_csv: str,
target: str,
- categorical_input: List,
- numerical_input: List,
+ categorical_input: Optional[List] = None,
+ numerical_input: Optional[List] = None,
valid_csv: Optional[str] = None,
test_csv: Optional[str] = None,
batch_size: int = 8,
|
{"golden_diff": "diff --git a/flash/tabular/classification/data/data.py b/flash/tabular/classification/data/data.py\n--- a/flash/tabular/classification/data/data.py\n+++ b/flash/tabular/classification/data/data.py\n@@ -71,9 +71,9 @@\n def __init__(\n self,\n train_df: DataFrame,\n- categorical_input: List,\n- numerical_input: List,\n target: str,\n+ categorical_input: Optional[List] = None,\n+ numerical_input: Optional[List] = None,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 2,\n@@ -82,6 +82,12 @@\n dfs = [train_df]\n self._test_df = None\n \n+ if categorical_input is None and numerical_input is None:\n+ raise RuntimeError('Both `categorical_input` and `numerical_input` are None!')\n+\n+ categorical_input = categorical_input if categorical_input is not None else []\n+ numerical_input = numerical_input if numerical_input is not None else []\n+\n if valid_df is not None:\n dfs.append(valid_df)\n \n@@ -133,8 +139,8 @@\n cls,\n train_df: DataFrame,\n target: str,\n- categorical_input: List,\n- numerical_input: List,\n+ categorical_input: Optional[List] = None,\n+ numerical_input: Optional[List] = None,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 8,\n@@ -194,8 +200,8 @@\n cls,\n train_csv: str,\n target: str,\n- categorical_input: List,\n- numerical_input: List,\n+ categorical_input: Optional[List] = None,\n+ numerical_input: Optional[List] = None,\n valid_csv: Optional[str] = None,\n test_csv: Optional[str] = None,\n batch_size: int = 8,\n", "issue": "Make categorical_input/numerical_input optional for TabularData.from_df\n## \ud83d\udc1b Bug\r\nIf I have only numerical features then I still have to pass an empty list for the categorical_input. It can be optional.\r\n\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. df = pd.DataFrame({'digit': [1,2,3], 'odd_even':[0,1,0]})\r\n2. datamodule = TabularData.from_df(df, 'odd_even', \r\n numerical_input=['digit'],\r\n )\r\n\r\n```\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-122-405a8bb49976> in <module>\r\n 1 datamodule = TabularData.from_df(final_data, 'target', \r\n----> 2 numerical_input=train_x.columns.tolist(),\r\n 3 # categorical_input=[],\r\n 4 )\r\n\r\nTypeError: from_df() missing 1 required positional argument: 'categorical_input'\r\n```\r\n\r\n#### Code sample\r\n```\r\ndf = pd.DataFrame({'digit': [1,2,3], 'odd_even':[0,1,0]})\r\ndatamodule = TabularData.from_df(df, 'odd_even', \r\n numerical_input=['digit'],\r\n )\r\n```\r\n\r\n### Expected behaviour\r\nIf only one of categorical or numerical input is passed then users should not be forced to enter an empty list.\r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0): '1.7.1'\r\n - OS (e.g., Linux): MacOS\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Python version:Python 3.7.9\r\n\r\n\r\n> I would love to start my contribution to Flash by fixing this issue.\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, List, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\nfrom sklearn.model_selection import train_test_split\nfrom torch import Tensor\n\nfrom flash.core.classification import ClassificationDataPipeline\nfrom flash.core.data import DataPipeline\nfrom flash.core.data.datamodule import DataModule\nfrom flash.core.data.utils import _contains_any_tensor\nfrom flash.tabular.classification.data.dataset import (\n _compute_normalization,\n _dfs_to_samples,\n _generate_codes,\n _impute,\n _pre_transform,\n PandasDataset,\n)\n\n\nclass TabularDataPipeline(ClassificationDataPipeline):\n\n def __init__(\n self,\n categorical_input: List,\n numerical_input: List,\n target: str,\n mean: DataFrame,\n std: DataFrame,\n codes: Dict,\n ):\n self._categorical_input = categorical_input\n self._numerical_input = numerical_input\n self._target = target\n self._mean = mean\n self._std = std\n self._codes = codes\n\n def before_collate(self, samples: Any) -> Any:\n \"\"\"Override to apply transformations to samples\"\"\"\n if _contains_any_tensor(samples, dtype=(Tensor, np.ndarray)):\n return samples\n if isinstance(samples, str):\n samples = pd.read_csv(samples)\n if isinstance(samples, DataFrame):\n samples = [samples]\n dfs = _pre_transform(\n samples, self._numerical_input, self._categorical_input, self._codes, self._mean, self._std\n )\n return _dfs_to_samples(dfs, self._categorical_input, self._numerical_input)\n\n\nclass TabularData(DataModule):\n \"\"\"Data module for tabular tasks\"\"\"\n\n def __init__(\n self,\n train_df: DataFrame,\n categorical_input: List,\n numerical_input: List,\n target: str,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 2,\n num_workers: Optional[int] = None,\n ):\n dfs = [train_df]\n self._test_df = None\n\n if valid_df is not None:\n dfs.append(valid_df)\n\n if test_df is not None:\n # save for predict function\n self._test_df = test_df.copy()\n self._test_df.drop(target, axis=1)\n dfs.append(test_df)\n\n # impute missing values\n dfs = _impute(dfs, numerical_input)\n\n # compute train dataset stats\n self.mean, self.std = _compute_normalization(dfs[0], numerical_input)\n\n if dfs[0][target].dtype == object:\n # if the target is a category, not an int\n self.target_codes = _generate_codes(dfs, [target])\n else:\n self.target_codes = None\n\n self.codes = _generate_codes(dfs, categorical_input)\n\n dfs = _pre_transform(\n dfs, numerical_input, categorical_input, self.codes, self.mean, self.std, target, self.target_codes\n )\n\n # normalize\n self.cat_cols = categorical_input\n self.num_cols = numerical_input\n\n self._num_classes = len(train_df[target].unique())\n\n train_ds = PandasDataset(dfs[0], categorical_input, numerical_input, target)\n valid_ds = PandasDataset(dfs[1], categorical_input, numerical_input, target) if valid_df is not None else None\n test_ds = PandasDataset(dfs[-1], categorical_input, numerical_input, target) if test_df is not None else None\n super().__init__(train_ds, valid_ds, test_ds, batch_size=batch_size, num_workers=num_workers)\n\n @property\n def num_classes(self) -> int:\n return self._num_classes\n\n @property\n def num_features(self) -> int:\n return len(self.cat_cols) + len(self.num_cols)\n\n @classmethod\n def from_df(\n cls,\n train_df: DataFrame,\n target: str,\n categorical_input: List,\n numerical_input: List,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 8,\n num_workers: Optional[int] = None,\n val_size: float = None,\n test_size: float = None,\n ):\n \"\"\"Creates a TabularData object from pandas DataFrames.\n\n Args:\n train_df: train data DataFrame\n target: The column containing the class id.\n categorical_input: The list of categorical columns.\n numerical_input: The list of numerical columns.\n valid_df: validation data DataFrame\n test_df: test data DataFrame\n batch_size: the batchsize to use for parallel loading. Defaults to 64.\n num_workers: The number of workers to use for parallelized loading.\n Defaults to None which equals the number of available CPU threads.\n val_size: float between 0 and 1 to create a validation dataset from train dataset\n test_size: float between 0 and 1 to create a test dataset from train validation\n\n Returns:\n TabularData: The constructed data module.\n\n Examples::\n\n text_data = TextClassificationData.from_files(\"train.csv\", label_field=\"class\", text_field=\"sentence\")\n \"\"\"\n if valid_df is None and isinstance(val_size, float) and isinstance(test_size, float):\n assert 0 < val_size and val_size < 1\n assert 0 < test_size and test_size < 1\n train_df, valid_df = train_test_split(train_df, test_size=(val_size + test_size))\n\n if test_df is None and isinstance(test_size, float):\n assert 0 < test_size and test_size < 1\n valid_df, test_df = train_test_split(valid_df, test_size=test_size)\n\n datamodule = cls(\n train_df=train_df,\n target=target,\n categorical_input=categorical_input,\n numerical_input=numerical_input,\n valid_df=valid_df,\n test_df=test_df,\n batch_size=batch_size,\n num_workers=num_workers,\n )\n datamodule.data_pipeline = TabularDataPipeline(\n categorical_input, numerical_input, target, datamodule.mean, datamodule.std, datamodule.codes\n )\n\n return datamodule\n\n @classmethod\n def from_csv(\n cls,\n train_csv: str,\n target: str,\n categorical_input: List,\n numerical_input: List,\n valid_csv: Optional[str] = None,\n test_csv: Optional[str] = None,\n batch_size: int = 8,\n num_workers: Optional[int] = None,\n val_size: Optional[float] = None,\n test_size: Optional[float] = None,\n **pandas_kwargs,\n ):\n \"\"\"Creates a TextClassificationData object from pandas DataFrames.\n\n Args:\n train_csv: train data csv file.\n target: The column containing the class id.\n categorical_input: The list of categorical columns.\n numerical_input: The list of numerical columns.\n valid_csv: validation data csv file.\n test_csv: test data csv file.\n batch_size: the batchsize to use for parallel loading. Defaults to 64.\n num_workers: The number of workers to use for parallelized loading.\n Defaults to None which equals the number of available CPU threads.\n val_size: float between 0 and 1 to create a validation dataset from train dataset\n test_size: float between 0 and 1 to create a test dataset from train validation\n\n Returns:\n TabularData: The constructed data module.\n\n Examples::\n\n text_data = TabularData.from_files(\"train.csv\", label_field=\"class\", text_field=\"sentence\")\n \"\"\"\n train_df = pd.read_csv(train_csv, **pandas_kwargs)\n valid_df = pd.read_csv(valid_csv, **pandas_kwargs) if valid_csv is not None else None\n test_df = pd.read_csv(test_csv, **pandas_kwargs) if test_csv is not None else None\n datamodule = cls.from_df(\n train_df, target, categorical_input, numerical_input, valid_df, test_df, batch_size, num_workers, val_size,\n test_size\n )\n return datamodule\n\n @property\n def emb_sizes(self) -> list:\n \"\"\"Recommended embedding sizes.\"\"\"\n\n # https://developers.googleblog.com/2017/11/introducing-tensorflow-feature-columns.html\n # The following \"formula\" provides a general rule of thumb about the number of embedding dimensions:\n # embedding_dimensions = number_of_categories**0.25\n\n num_classes = [len(self.codes[cat]) for cat in self.cat_cols]\n emb_dims = [max(int(n**0.25), 16) for n in num_classes]\n return list(zip(num_classes, emb_dims))\n\n @staticmethod\n def default_pipeline() -> DataPipeline():\n # TabularDataPipeline depends on the data\n return DataPipeline()\n", "path": "flash/tabular/classification/data/data.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, List, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\nfrom sklearn.model_selection import train_test_split\nfrom torch import Tensor\n\nfrom flash.core.classification import ClassificationDataPipeline\nfrom flash.core.data import DataPipeline\nfrom flash.core.data.datamodule import DataModule\nfrom flash.core.data.utils import _contains_any_tensor\nfrom flash.tabular.classification.data.dataset import (\n _compute_normalization,\n _dfs_to_samples,\n _generate_codes,\n _impute,\n _pre_transform,\n PandasDataset,\n)\n\n\nclass TabularDataPipeline(ClassificationDataPipeline):\n\n def __init__(\n self,\n categorical_input: List,\n numerical_input: List,\n target: str,\n mean: DataFrame,\n std: DataFrame,\n codes: Dict,\n ):\n self._categorical_input = categorical_input\n self._numerical_input = numerical_input\n self._target = target\n self._mean = mean\n self._std = std\n self._codes = codes\n\n def before_collate(self, samples: Any) -> Any:\n \"\"\"Override to apply transformations to samples\"\"\"\n if _contains_any_tensor(samples, dtype=(Tensor, np.ndarray)):\n return samples\n if isinstance(samples, str):\n samples = pd.read_csv(samples)\n if isinstance(samples, DataFrame):\n samples = [samples]\n dfs = _pre_transform(\n samples, self._numerical_input, self._categorical_input, self._codes, self._mean, self._std\n )\n return _dfs_to_samples(dfs, self._categorical_input, self._numerical_input)\n\n\nclass TabularData(DataModule):\n \"\"\"Data module for tabular tasks\"\"\"\n\n def __init__(\n self,\n train_df: DataFrame,\n target: str,\n categorical_input: Optional[List] = None,\n numerical_input: Optional[List] = None,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 2,\n num_workers: Optional[int] = None,\n ):\n dfs = [train_df]\n self._test_df = None\n\n if categorical_input is None and numerical_input is None:\n raise RuntimeError('Both `categorical_input` and `numerical_input` are None!')\n\n categorical_input = categorical_input if categorical_input is not None else []\n numerical_input = numerical_input if numerical_input is not None else []\n\n if valid_df is not None:\n dfs.append(valid_df)\n\n if test_df is not None:\n # save for predict function\n self._test_df = test_df.copy()\n self._test_df.drop(target, axis=1)\n dfs.append(test_df)\n\n # impute missing values\n dfs = _impute(dfs, numerical_input)\n\n # compute train dataset stats\n self.mean, self.std = _compute_normalization(dfs[0], numerical_input)\n\n if dfs[0][target].dtype == object:\n # if the target is a category, not an int\n self.target_codes = _generate_codes(dfs, [target])\n else:\n self.target_codes = None\n\n self.codes = _generate_codes(dfs, categorical_input)\n\n dfs = _pre_transform(\n dfs, numerical_input, categorical_input, self.codes, self.mean, self.std, target, self.target_codes\n )\n\n # normalize\n self.cat_cols = categorical_input\n self.num_cols = numerical_input\n\n self._num_classes = len(train_df[target].unique())\n\n train_ds = PandasDataset(dfs[0], categorical_input, numerical_input, target)\n valid_ds = PandasDataset(dfs[1], categorical_input, numerical_input, target) if valid_df is not None else None\n test_ds = PandasDataset(dfs[-1], categorical_input, numerical_input, target) if test_df is not None else None\n super().__init__(train_ds, valid_ds, test_ds, batch_size=batch_size, num_workers=num_workers)\n\n @property\n def num_classes(self) -> int:\n return self._num_classes\n\n @property\n def num_features(self) -> int:\n return len(self.cat_cols) + len(self.num_cols)\n\n @classmethod\n def from_df(\n cls,\n train_df: DataFrame,\n target: str,\n categorical_input: Optional[List] = None,\n numerical_input: Optional[List] = None,\n valid_df: Optional[DataFrame] = None,\n test_df: Optional[DataFrame] = None,\n batch_size: int = 8,\n num_workers: Optional[int] = None,\n val_size: float = None,\n test_size: float = None,\n ):\n \"\"\"Creates a TabularData object from pandas DataFrames.\n\n Args:\n train_df: train data DataFrame\n target: The column containing the class id.\n categorical_input: The list of categorical columns.\n numerical_input: The list of numerical columns.\n valid_df: validation data DataFrame\n test_df: test data DataFrame\n batch_size: the batchsize to use for parallel loading. Defaults to 64.\n num_workers: The number of workers to use for parallelized loading.\n Defaults to None which equals the number of available CPU threads.\n val_size: float between 0 and 1 to create a validation dataset from train dataset\n test_size: float between 0 and 1 to create a test dataset from train validation\n\n Returns:\n TabularData: The constructed data module.\n\n Examples::\n\n text_data = TextClassificationData.from_files(\"train.csv\", label_field=\"class\", text_field=\"sentence\")\n \"\"\"\n if valid_df is None and isinstance(val_size, float) and isinstance(test_size, float):\n assert 0 < val_size and val_size < 1\n assert 0 < test_size and test_size < 1\n train_df, valid_df = train_test_split(train_df, test_size=(val_size + test_size))\n\n if test_df is None and isinstance(test_size, float):\n assert 0 < test_size and test_size < 1\n valid_df, test_df = train_test_split(valid_df, test_size=test_size)\n\n datamodule = cls(\n train_df=train_df,\n target=target,\n categorical_input=categorical_input,\n numerical_input=numerical_input,\n valid_df=valid_df,\n test_df=test_df,\n batch_size=batch_size,\n num_workers=num_workers,\n )\n datamodule.data_pipeline = TabularDataPipeline(\n categorical_input, numerical_input, target, datamodule.mean, datamodule.std, datamodule.codes\n )\n\n return datamodule\n\n @classmethod\n def from_csv(\n cls,\n train_csv: str,\n target: str,\n categorical_input: Optional[List] = None,\n numerical_input: Optional[List] = None,\n valid_csv: Optional[str] = None,\n test_csv: Optional[str] = None,\n batch_size: int = 8,\n num_workers: Optional[int] = None,\n val_size: Optional[float] = None,\n test_size: Optional[float] = None,\n **pandas_kwargs,\n ):\n \"\"\"Creates a TextClassificationData object from pandas DataFrames.\n\n Args:\n train_csv: train data csv file.\n target: The column containing the class id.\n categorical_input: The list of categorical columns.\n numerical_input: The list of numerical columns.\n valid_csv: validation data csv file.\n test_csv: test data csv file.\n batch_size: the batchsize to use for parallel loading. Defaults to 64.\n num_workers: The number of workers to use for parallelized loading.\n Defaults to None which equals the number of available CPU threads.\n val_size: float between 0 and 1 to create a validation dataset from train dataset\n test_size: float between 0 and 1 to create a test dataset from train validation\n\n Returns:\n TabularData: The constructed data module.\n\n Examples::\n\n text_data = TabularData.from_files(\"train.csv\", label_field=\"class\", text_field=\"sentence\")\n \"\"\"\n train_df = pd.read_csv(train_csv, **pandas_kwargs)\n valid_df = pd.read_csv(valid_csv, **pandas_kwargs) if valid_csv is not None else None\n test_df = pd.read_csv(test_csv, **pandas_kwargs) if test_csv is not None else None\n datamodule = cls.from_df(\n train_df, target, categorical_input, numerical_input, valid_df, test_df, batch_size, num_workers, val_size,\n test_size\n )\n return datamodule\n\n @property\n def emb_sizes(self) -> list:\n \"\"\"Recommended embedding sizes.\"\"\"\n\n # https://developers.googleblog.com/2017/11/introducing-tensorflow-feature-columns.html\n # The following \"formula\" provides a general rule of thumb about the number of embedding dimensions:\n # embedding_dimensions = number_of_categories**0.25\n\n num_classes = [len(self.codes[cat]) for cat in self.cat_cols]\n emb_dims = [max(int(n**0.25), 16) for n in num_classes]\n return list(zip(num_classes, emb_dims))\n\n @staticmethod\n def default_pipeline() -> DataPipeline():\n # TabularDataPipeline depends on the data\n return DataPipeline()\n", "path": "flash/tabular/classification/data/data.py"}]}
| 3,438 | 442 |
gh_patches_debug_13699
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-176
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Editing an existing user token shows "create" buttons instead of "update"
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version:
* Nautobot version:
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Navigate to user "Profile"
2. Navigate to "API Tokens"
3. Click "Add token"
4. Click "Create"
5. From the token list view, click "Edit" on the token you just created
<!-- What did you expect to happen? -->
### Expected Behavior
There should be an "Update" button.
<!-- What happened instead? -->
### Observed Behavior
There are "Create" and "Create and Add Another" buttons.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/users/views.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.auth import (
6 login as auth_login,
7 logout as auth_logout,
8 update_session_auth_hash,
9 )
10 from django.contrib.auth.mixins import LoginRequiredMixin
11 from django.contrib.auth.models import update_last_login
12 from django.contrib.auth.signals import user_logged_in
13 from django.http import HttpResponseForbidden, HttpResponseRedirect
14 from django.shortcuts import get_object_or_404, redirect, render
15 from django.urls import reverse
16 from django.utils.decorators import method_decorator
17 from django.utils.http import is_safe_url
18 from django.views.decorators.debug import sensitive_post_parameters
19 from django.views.generic import View
20
21 from nautobot.utilities.forms import ConfirmationForm
22 from .forms import LoginForm, PasswordChangeForm, TokenForm
23 from .models import Token
24
25
26 #
27 # Login/logout
28 #
29
30
31 class LoginView(View):
32 """
33 Perform user authentication via the web UI.
34 """
35
36 template_name = "login.html"
37
38 @method_decorator(sensitive_post_parameters("password"))
39 def dispatch(self, *args, **kwargs):
40 return super().dispatch(*args, **kwargs)
41
42 def get(self, request):
43 form = LoginForm(request)
44
45 if request.user.is_authenticated:
46 logger = logging.getLogger("nautobot.auth.login")
47 return self.redirect_to_next(request, logger)
48
49 return render(
50 request,
51 self.template_name,
52 {
53 "form": form,
54 },
55 )
56
57 def post(self, request):
58 logger = logging.getLogger("nautobot.auth.login")
59 form = LoginForm(request, data=request.POST)
60
61 if form.is_valid():
62 logger.debug("Login form validation was successful")
63
64 # If maintenance mode is enabled, assume the database is read-only, and disable updating the user's
65 # last_login time upon authentication.
66 if settings.MAINTENANCE_MODE:
67 logger.warning("Maintenance mode enabled: disabling update of most recent login time")
68 user_logged_in.disconnect(update_last_login, dispatch_uid="update_last_login")
69
70 # Authenticate user
71 auth_login(request, form.get_user())
72 logger.info(f"User {request.user} successfully authenticated")
73 messages.info(request, "Logged in as {}.".format(request.user))
74
75 return self.redirect_to_next(request, logger)
76
77 else:
78 logger.debug("Login form validation failed")
79
80 return render(
81 request,
82 self.template_name,
83 {
84 "form": form,
85 },
86 )
87
88 def redirect_to_next(self, request, logger):
89 if request.method == "POST":
90 redirect_to = request.POST.get("next", reverse("home"))
91 else:
92 redirect_to = request.GET.get("next", reverse("home"))
93
94 if redirect_to and not is_safe_url(url=redirect_to, allowed_hosts=request.get_host()):
95 logger.warning(f"Ignoring unsafe 'next' URL passed to login form: {redirect_to}")
96 redirect_to = reverse("home")
97
98 logger.debug(f"Redirecting user to {redirect_to}")
99 return HttpResponseRedirect(redirect_to)
100
101
102 class LogoutView(View):
103 """
104 Deauthenticate a web user.
105 """
106
107 def get(self, request):
108 logger = logging.getLogger("nautobot.auth.logout")
109
110 # Log out the user
111 username = request.user
112 auth_logout(request)
113 logger.info(f"User {username} has logged out")
114 messages.info(request, "You have logged out.")
115
116 # Delete session key cookie (if set) upon logout
117 response = HttpResponseRedirect(reverse("home"))
118 response.delete_cookie("session_key")
119
120 return response
121
122
123 #
124 # User profiles
125 #
126
127
128 class ProfileView(LoginRequiredMixin, View):
129 template_name = "users/profile.html"
130
131 def get(self, request):
132
133 return render(
134 request,
135 self.template_name,
136 {
137 "active_tab": "profile",
138 },
139 )
140
141
142 class UserConfigView(LoginRequiredMixin, View):
143 template_name = "users/preferences.html"
144
145 def get(self, request):
146
147 return render(
148 request,
149 self.template_name,
150 {
151 "preferences": request.user.all_config(),
152 "active_tab": "preferences",
153 },
154 )
155
156 def post(self, request):
157 user = request.user
158 data = user.all_config()
159
160 # Delete selected preferences
161 for key in request.POST.getlist("pk"):
162 if key in data:
163 user.clear_config(key)
164 user.save()
165 messages.success(request, "Your preferences have been updated.")
166
167 return redirect("user:preferences")
168
169
170 class ChangePasswordView(LoginRequiredMixin, View):
171 template_name = "users/change_password.html"
172
173 def get(self, request):
174 # LDAP users cannot change their password here
175 if getattr(request.user, "ldap_username", None):
176 messages.warning(
177 request,
178 "LDAP-authenticated user credentials cannot be changed within Nautobot.",
179 )
180 return redirect("user:profile")
181
182 form = PasswordChangeForm(user=request.user)
183
184 return render(
185 request,
186 self.template_name,
187 {
188 "form": form,
189 "active_tab": "change_password",
190 },
191 )
192
193 def post(self, request):
194 form = PasswordChangeForm(user=request.user, data=request.POST)
195 if form.is_valid():
196 form.save()
197 update_session_auth_hash(request, form.user)
198 messages.success(request, "Your password has been changed successfully.")
199 return redirect("user:profile")
200
201 return render(
202 request,
203 self.template_name,
204 {
205 "form": form,
206 "active_tab": "change_password",
207 },
208 )
209
210
211 #
212 # API tokens
213 #
214
215
216 class TokenListView(LoginRequiredMixin, View):
217 def get(self, request):
218
219 tokens = Token.objects.filter(user=request.user)
220
221 return render(
222 request,
223 "users/api_tokens.html",
224 {
225 "tokens": tokens,
226 "active_tab": "api_tokens",
227 },
228 )
229
230
231 class TokenEditView(LoginRequiredMixin, View):
232 def get(self, request, pk=None):
233
234 if pk is not None:
235 if not request.user.has_perm("users.change_token"):
236 return HttpResponseForbidden()
237 token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
238 else:
239 if not request.user.has_perm("users.add_token"):
240 return HttpResponseForbidden()
241 token = Token(user=request.user)
242
243 form = TokenForm(instance=token)
244
245 return render(
246 request,
247 "generic/object_edit.html",
248 {
249 "obj": token,
250 "obj_type": token._meta.verbose_name,
251 "form": form,
252 "return_url": reverse("user:token_list"),
253 },
254 )
255
256 def post(self, request, pk=None):
257
258 if pk is not None:
259 token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
260 form = TokenForm(request.POST, instance=token)
261 else:
262 token = Token()
263 form = TokenForm(request.POST)
264
265 if form.is_valid():
266 token = form.save(commit=False)
267 token.user = request.user
268 token.save()
269
270 msg = "Modified token {}".format(token) if pk else "Created token {}".format(token)
271 messages.success(request, msg)
272
273 if "_addanother" in request.POST:
274 return redirect(request.path)
275 else:
276 return redirect("user:token_list")
277
278 return render(
279 request,
280 "generic/object_edit.html",
281 {
282 "obj": token,
283 "obj_type": token._meta.verbose_name,
284 "form": form,
285 "return_url": reverse("user:token_list"),
286 },
287 )
288
289
290 class TokenDeleteView(LoginRequiredMixin, View):
291 def get(self, request, pk):
292
293 token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
294 initial_data = {
295 "return_url": reverse("user:token_list"),
296 }
297 form = ConfirmationForm(initial=initial_data)
298
299 return render(
300 request,
301 "generic/object_delete.html",
302 {
303 "obj": token,
304 "obj_type": token._meta.verbose_name,
305 "form": form,
306 "return_url": reverse("user:token_list"),
307 },
308 )
309
310 def post(self, request, pk):
311
312 token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)
313 form = ConfirmationForm(request.POST)
314 if form.is_valid():
315 token.delete()
316 messages.success(request, "Token deleted")
317 return redirect("user:token_list")
318
319 return render(
320 request,
321 "generic/object_delete.html",
322 {
323 "obj": token,
324 "obj_type": token._meta.verbose_name,
325 "form": form,
326 "return_url": reverse("user:token_list"),
327 },
328 )
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/users/views.py b/nautobot/users/views.py
--- a/nautobot/users/views.py
+++ b/nautobot/users/views.py
@@ -250,6 +250,7 @@
"obj_type": token._meta.verbose_name,
"form": form,
"return_url": reverse("user:token_list"),
+ "editing": token.present_in_database,
},
)
@@ -283,6 +284,7 @@
"obj_type": token._meta.verbose_name,
"form": form,
"return_url": reverse("user:token_list"),
+ "editing": token.present_in_database,
},
)
|
{"golden_diff": "diff --git a/nautobot/users/views.py b/nautobot/users/views.py\n--- a/nautobot/users/views.py\n+++ b/nautobot/users/views.py\n@@ -250,6 +250,7 @@\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n+ \"editing\": token.present_in_database,\n },\n )\n \n@@ -283,6 +284,7 @@\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n+ \"editing\": token.present_in_database,\n },\n )\n", "issue": "Editing an existing user token shows \"create\" buttons instead of \"update\"\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version:\r\n* Nautobot version:\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Navigate to user \"Profile\"\r\n2. Navigate to \"API Tokens\"\r\n3. Click \"Add token\"\r\n4. Click \"Create\"\r\n5. From the token list view, click \"Edit\" on the token you just created\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nThere should be an \"Update\" button.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\nThere are \"Create\" and \"Create and Add Another\" buttons. \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import (\n login as auth_login,\n logout as auth_logout,\n update_session_auth_hash,\n)\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import update_last_login\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.http import HttpResponseForbidden, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import is_safe_url\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.generic import View\n\nfrom nautobot.utilities.forms import ConfirmationForm\nfrom .forms import LoginForm, PasswordChangeForm, TokenForm\nfrom .models import Token\n\n\n#\n# Login/logout\n#\n\n\nclass LoginView(View):\n \"\"\"\n Perform user authentication via the web UI.\n \"\"\"\n\n template_name = \"login.html\"\n\n @method_decorator(sensitive_post_parameters(\"password\"))\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request):\n form = LoginForm(request)\n\n if request.user.is_authenticated:\n logger = logging.getLogger(\"nautobot.auth.login\")\n return self.redirect_to_next(request, logger)\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n },\n )\n\n def post(self, request):\n logger = logging.getLogger(\"nautobot.auth.login\")\n form = LoginForm(request, data=request.POST)\n\n if form.is_valid():\n logger.debug(\"Login form validation was successful\")\n\n # If maintenance mode is enabled, assume the database is read-only, and disable updating the user's\n # last_login time upon authentication.\n if settings.MAINTENANCE_MODE:\n logger.warning(\"Maintenance mode enabled: disabling update of most recent login time\")\n user_logged_in.disconnect(update_last_login, dispatch_uid=\"update_last_login\")\n\n # Authenticate user\n auth_login(request, form.get_user())\n logger.info(f\"User {request.user} successfully authenticated\")\n messages.info(request, \"Logged in as {}.\".format(request.user))\n\n return self.redirect_to_next(request, logger)\n\n else:\n logger.debug(\"Login form validation failed\")\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n },\n )\n\n def redirect_to_next(self, request, logger):\n if request.method == \"POST\":\n redirect_to = request.POST.get(\"next\", reverse(\"home\"))\n else:\n redirect_to = request.GET.get(\"next\", reverse(\"home\"))\n\n if redirect_to and not is_safe_url(url=redirect_to, allowed_hosts=request.get_host()):\n logger.warning(f\"Ignoring unsafe 'next' URL passed to login form: {redirect_to}\")\n redirect_to = reverse(\"home\")\n\n logger.debug(f\"Redirecting user to {redirect_to}\")\n return HttpResponseRedirect(redirect_to)\n\n\nclass LogoutView(View):\n \"\"\"\n Deauthenticate a web user.\n \"\"\"\n\n def get(self, request):\n logger = logging.getLogger(\"nautobot.auth.logout\")\n\n # Log out the user\n username = request.user\n auth_logout(request)\n logger.info(f\"User {username} has logged out\")\n messages.info(request, \"You have logged out.\")\n\n # Delete session key cookie (if set) upon logout\n response = HttpResponseRedirect(reverse(\"home\"))\n response.delete_cookie(\"session_key\")\n\n return response\n\n\n#\n# User profiles\n#\n\n\nclass ProfileView(LoginRequiredMixin, View):\n template_name = \"users/profile.html\"\n\n def get(self, request):\n\n return render(\n request,\n self.template_name,\n {\n \"active_tab\": \"profile\",\n },\n )\n\n\nclass UserConfigView(LoginRequiredMixin, View):\n template_name = \"users/preferences.html\"\n\n def get(self, request):\n\n return render(\n request,\n self.template_name,\n {\n \"preferences\": request.user.all_config(),\n \"active_tab\": \"preferences\",\n },\n )\n\n def post(self, request):\n user = request.user\n data = user.all_config()\n\n # Delete selected preferences\n for key in request.POST.getlist(\"pk\"):\n if key in data:\n user.clear_config(key)\n user.save()\n messages.success(request, \"Your preferences have been updated.\")\n\n return redirect(\"user:preferences\")\n\n\nclass ChangePasswordView(LoginRequiredMixin, View):\n template_name = \"users/change_password.html\"\n\n def get(self, request):\n # LDAP users cannot change their password here\n if getattr(request.user, \"ldap_username\", None):\n messages.warning(\n request,\n \"LDAP-authenticated user credentials cannot be changed within Nautobot.\",\n )\n return redirect(\"user:profile\")\n\n form = PasswordChangeForm(user=request.user)\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n \"active_tab\": \"change_password\",\n },\n )\n\n def post(self, request):\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n messages.success(request, \"Your password has been changed successfully.\")\n return redirect(\"user:profile\")\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n \"active_tab\": \"change_password\",\n },\n )\n\n\n#\n# API tokens\n#\n\n\nclass TokenListView(LoginRequiredMixin, View):\n def get(self, request):\n\n tokens = Token.objects.filter(user=request.user)\n\n return render(\n request,\n \"users/api_tokens.html\",\n {\n \"tokens\": tokens,\n \"active_tab\": \"api_tokens\",\n },\n )\n\n\nclass TokenEditView(LoginRequiredMixin, View):\n def get(self, request, pk=None):\n\n if pk is not None:\n if not request.user.has_perm(\"users.change_token\"):\n return HttpResponseForbidden()\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n else:\n if not request.user.has_perm(\"users.add_token\"):\n return HttpResponseForbidden()\n token = Token(user=request.user)\n\n form = TokenForm(instance=token)\n\n return render(\n request,\n \"generic/object_edit.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n\n def post(self, request, pk=None):\n\n if pk is not None:\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n form = TokenForm(request.POST, instance=token)\n else:\n token = Token()\n form = TokenForm(request.POST)\n\n if form.is_valid():\n token = form.save(commit=False)\n token.user = request.user\n token.save()\n\n msg = \"Modified token {}\".format(token) if pk else \"Created token {}\".format(token)\n messages.success(request, msg)\n\n if \"_addanother\" in request.POST:\n return redirect(request.path)\n else:\n return redirect(\"user:token_list\")\n\n return render(\n request,\n \"generic/object_edit.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n\n\nclass TokenDeleteView(LoginRequiredMixin, View):\n def get(self, request, pk):\n\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n initial_data = {\n \"return_url\": reverse(\"user:token_list\"),\n }\n form = ConfirmationForm(initial=initial_data)\n\n return render(\n request,\n \"generic/object_delete.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n\n def post(self, request, pk):\n\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n form = ConfirmationForm(request.POST)\n if form.is_valid():\n token.delete()\n messages.success(request, \"Token deleted\")\n return redirect(\"user:token_list\")\n\n return render(\n request,\n \"generic/object_delete.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n", "path": "nautobot/users/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import (\n login as auth_login,\n logout as auth_logout,\n update_session_auth_hash,\n)\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import update_last_login\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.http import HttpResponseForbidden, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import is_safe_url\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.generic import View\n\nfrom nautobot.utilities.forms import ConfirmationForm\nfrom .forms import LoginForm, PasswordChangeForm, TokenForm\nfrom .models import Token\n\n\n#\n# Login/logout\n#\n\n\nclass LoginView(View):\n \"\"\"\n Perform user authentication via the web UI.\n \"\"\"\n\n template_name = \"login.html\"\n\n @method_decorator(sensitive_post_parameters(\"password\"))\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request):\n form = LoginForm(request)\n\n if request.user.is_authenticated:\n logger = logging.getLogger(\"nautobot.auth.login\")\n return self.redirect_to_next(request, logger)\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n },\n )\n\n def post(self, request):\n logger = logging.getLogger(\"nautobot.auth.login\")\n form = LoginForm(request, data=request.POST)\n\n if form.is_valid():\n logger.debug(\"Login form validation was successful\")\n\n # If maintenance mode is enabled, assume the database is read-only, and disable updating the user's\n # last_login time upon authentication.\n if settings.MAINTENANCE_MODE:\n logger.warning(\"Maintenance mode enabled: disabling update of most recent login time\")\n user_logged_in.disconnect(update_last_login, dispatch_uid=\"update_last_login\")\n\n # Authenticate user\n auth_login(request, form.get_user())\n logger.info(f\"User {request.user} successfully authenticated\")\n messages.info(request, \"Logged in as {}.\".format(request.user))\n\n return self.redirect_to_next(request, logger)\n\n else:\n logger.debug(\"Login form validation failed\")\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n },\n )\n\n def redirect_to_next(self, request, logger):\n if request.method == \"POST\":\n redirect_to = request.POST.get(\"next\", reverse(\"home\"))\n else:\n redirect_to = request.GET.get(\"next\", reverse(\"home\"))\n\n if redirect_to and not is_safe_url(url=redirect_to, allowed_hosts=request.get_host()):\n logger.warning(f\"Ignoring unsafe 'next' URL passed to login form: {redirect_to}\")\n redirect_to = reverse(\"home\")\n\n logger.debug(f\"Redirecting user to {redirect_to}\")\n return HttpResponseRedirect(redirect_to)\n\n\nclass LogoutView(View):\n \"\"\"\n Deauthenticate a web user.\n \"\"\"\n\n def get(self, request):\n logger = logging.getLogger(\"nautobot.auth.logout\")\n\n # Log out the user\n username = request.user\n auth_logout(request)\n logger.info(f\"User {username} has logged out\")\n messages.info(request, \"You have logged out.\")\n\n # Delete session key cookie (if set) upon logout\n response = HttpResponseRedirect(reverse(\"home\"))\n response.delete_cookie(\"session_key\")\n\n return response\n\n\n#\n# User profiles\n#\n\n\nclass ProfileView(LoginRequiredMixin, View):\n template_name = \"users/profile.html\"\n\n def get(self, request):\n\n return render(\n request,\n self.template_name,\n {\n \"active_tab\": \"profile\",\n },\n )\n\n\nclass UserConfigView(LoginRequiredMixin, View):\n template_name = \"users/preferences.html\"\n\n def get(self, request):\n\n return render(\n request,\n self.template_name,\n {\n \"preferences\": request.user.all_config(),\n \"active_tab\": \"preferences\",\n },\n )\n\n def post(self, request):\n user = request.user\n data = user.all_config()\n\n # Delete selected preferences\n for key in request.POST.getlist(\"pk\"):\n if key in data:\n user.clear_config(key)\n user.save()\n messages.success(request, \"Your preferences have been updated.\")\n\n return redirect(\"user:preferences\")\n\n\nclass ChangePasswordView(LoginRequiredMixin, View):\n template_name = \"users/change_password.html\"\n\n def get(self, request):\n # LDAP users cannot change their password here\n if getattr(request.user, \"ldap_username\", None):\n messages.warning(\n request,\n \"LDAP-authenticated user credentials cannot be changed within Nautobot.\",\n )\n return redirect(\"user:profile\")\n\n form = PasswordChangeForm(user=request.user)\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n \"active_tab\": \"change_password\",\n },\n )\n\n def post(self, request):\n form = PasswordChangeForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n messages.success(request, \"Your password has been changed successfully.\")\n return redirect(\"user:profile\")\n\n return render(\n request,\n self.template_name,\n {\n \"form\": form,\n \"active_tab\": \"change_password\",\n },\n )\n\n\n#\n# API tokens\n#\n\n\nclass TokenListView(LoginRequiredMixin, View):\n def get(self, request):\n\n tokens = Token.objects.filter(user=request.user)\n\n return render(\n request,\n \"users/api_tokens.html\",\n {\n \"tokens\": tokens,\n \"active_tab\": \"api_tokens\",\n },\n )\n\n\nclass TokenEditView(LoginRequiredMixin, View):\n def get(self, request, pk=None):\n\n if pk is not None:\n if not request.user.has_perm(\"users.change_token\"):\n return HttpResponseForbidden()\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n else:\n if not request.user.has_perm(\"users.add_token\"):\n return HttpResponseForbidden()\n token = Token(user=request.user)\n\n form = TokenForm(instance=token)\n\n return render(\n request,\n \"generic/object_edit.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n \"editing\": token.present_in_database,\n },\n )\n\n def post(self, request, pk=None):\n\n if pk is not None:\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n form = TokenForm(request.POST, instance=token)\n else:\n token = Token()\n form = TokenForm(request.POST)\n\n if form.is_valid():\n token = form.save(commit=False)\n token.user = request.user\n token.save()\n\n msg = \"Modified token {}\".format(token) if pk else \"Created token {}\".format(token)\n messages.success(request, msg)\n\n if \"_addanother\" in request.POST:\n return redirect(request.path)\n else:\n return redirect(\"user:token_list\")\n\n return render(\n request,\n \"generic/object_edit.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n \"editing\": token.present_in_database,\n },\n )\n\n\nclass TokenDeleteView(LoginRequiredMixin, View):\n def get(self, request, pk):\n\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n initial_data = {\n \"return_url\": reverse(\"user:token_list\"),\n }\n form = ConfirmationForm(initial=initial_data)\n\n return render(\n request,\n \"generic/object_delete.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n\n def post(self, request, pk):\n\n token = get_object_or_404(Token.objects.filter(user=request.user), pk=pk)\n form = ConfirmationForm(request.POST)\n if form.is_valid():\n token.delete()\n messages.success(request, \"Token deleted\")\n return redirect(\"user:token_list\")\n\n return render(\n request,\n \"generic/object_delete.html\",\n {\n \"obj\": token,\n \"obj_type\": token._meta.verbose_name,\n \"form\": form,\n \"return_url\": reverse(\"user:token_list\"),\n },\n )\n", "path": "nautobot/users/views.py"}]}
| 3,422 | 151 |
gh_patches_debug_24316
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2757
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't update my take
I tried to increase my take from one of the teams that I belong but was unable to. I got the following error.

I was increasing to less that double the amount and my history shows that the new amount is less than double the the amount I was attempting to take.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/models/_mixin_team.py`
Content:
```
1 """Teams on Gratipay are plural participants with members.
2 """
3 from collections import OrderedDict
4 from decimal import Decimal
5
6 from aspen.utils import typecheck
7
8
9 class MemberLimitReached(Exception): pass
10
11 class StubParticipantAdded(Exception): pass
12
13 class MixinTeam(object):
14 """This class provides methods for working with a Participant as a Team.
15
16 :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team
17
18 """
19
20 # XXX These were all written with the ORM and need to be converted.
21
22 def __init__(self, participant):
23 self.participant = participant
24
25 def show_as_team(self, user):
26 """Return a boolean, whether to show this participant as a team.
27 """
28 if not self.IS_PLURAL:
29 return False
30 if user.ADMIN:
31 return True
32 if not self.get_current_takes():
33 if self == user.participant:
34 return True
35 return False
36 return True
37
38 def add_member(self, member):
39 """Add a member to this team.
40 """
41 assert self.IS_PLURAL
42 if len(self.get_current_takes()) == 149:
43 raise MemberLimitReached
44 if not member.is_claimed:
45 raise StubParticipantAdded
46 self.__set_take_for(member, Decimal('0.01'), self)
47
48 def remove_member(self, member):
49 """Remove a member from this team.
50 """
51 assert self.IS_PLURAL
52 self.__set_take_for(member, Decimal('0.00'), self)
53
54 def remove_all_members(self, cursor=None):
55 (cursor or self.db).run("""
56 INSERT INTO takes (ctime, member, team, amount, recorder) (
57 SELECT ctime, member, %(username)s, 0.00, %(username)s
58 FROM current_takes
59 WHERE team=%(username)s
60 AND amount > 0
61 );
62 """, dict(username=self.username))
63
64 def member_of(self, team):
65 """Given a Participant object, return a boolean.
66 """
67 assert team.IS_PLURAL
68 for take in team.get_current_takes():
69 if take['member'] == self.username:
70 return True
71 return False
72
73 def get_take_last_week_for(self, member):
74 """What did the user actually take most recently? Used in throttling.
75 """
76 assert self.IS_PLURAL
77 membername = member.username if hasattr(member, 'username') \
78 else member['username']
79 return self.db.one("""
80
81 SELECT amount
82 FROM transfers
83 WHERE tipper=%s AND tippee=%s AND context='take'
84 AND timestamp > (
85 SELECT ts_start
86 FROM paydays
87 WHERE ts_end > ts_start
88 ORDER BY ts_start DESC LIMIT 1
89 )
90 ORDER BY timestamp ASC LIMIT 1
91
92 """, (self.username, membername), default=Decimal('0.00'))
93
94 def get_take_for(self, member):
95 """Return a Decimal representation of the take for this member, or 0.
96 """
97 assert self.IS_PLURAL
98 return self.db.one( "SELECT amount FROM current_takes "
99 "WHERE member=%s AND team=%s"
100 , (member.username, self.username)
101 , default=Decimal('0.00')
102 )
103
104 def compute_max_this_week(self, last_week):
105 """2x last week's take, but at least a dollar.
106 """
107 return max(last_week * Decimal('2'), Decimal('1.00'))
108
109 def set_take_for(self, member, take, recorder):
110 """Sets member's take from the team pool.
111 """
112 assert self.IS_PLURAL
113
114 # lazy import to avoid circular import
115 from gratipay.security.user import User
116 from gratipay.models.participant import Participant
117
118 typecheck( member, Participant
119 , take, Decimal
120 , recorder, (Participant, User)
121 )
122
123 last_week = self.get_take_last_week_for(member)
124 max_this_week = self.compute_max_this_week(last_week)
125 if take > max_this_week:
126 take = max_this_week
127
128 self.__set_take_for(member, take, recorder)
129 return take
130
131 def __set_take_for(self, member, amount, recorder):
132 assert self.IS_PLURAL
133 # XXX Factored out for testing purposes only! :O Use .set_take_for.
134 with self.db.get_cursor() as cursor:
135 # Lock to avoid race conditions
136 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE")
137 # Compute the current takes
138 old_takes = self.compute_actual_takes(cursor)
139 # Insert the new take
140 cursor.run("""
141
142 INSERT INTO takes (ctime, member, team, amount, recorder)
143 VALUES ( COALESCE (( SELECT ctime
144 FROM takes
145 WHERE member=%(member)s
146 AND team=%(team)s
147 LIMIT 1
148 ), CURRENT_TIMESTAMP)
149 , %(member)s
150 , %(team)s
151 , %(amount)s
152 , %(recorder)s
153 )
154
155 """, dict(member=member.username, team=self.username, amount=amount,
156 recorder=recorder.username))
157 # Compute the new takes
158 new_takes = self.compute_actual_takes(cursor)
159 # Update receiving amounts in the participants table
160 self.update_taking(old_takes, new_takes, cursor, member)
161
162 def update_taking(self, old_takes, new_takes, cursor=None, member=None):
163 """Update `taking` amounts based on the difference between `old_takes`
164 and `new_takes`.
165 """
166 for username in set(old_takes.keys()).union(new_takes.keys()):
167 if username == self.username:
168 continue
169 old = old_takes.get(username, {}).get('actual_amount', Decimal(0))
170 new = new_takes.get(username, {}).get('actual_amount', Decimal(0))
171 diff = new - old
172 if diff != 0:
173 r = (self.db or cursor).one("""
174 UPDATE participants
175 SET taking = (taking + %(diff)s)
176 , receiving = (receiving + %(diff)s)
177 WHERE username=%(username)s
178 RETURNING taking, receiving
179 """, dict(username=username, diff=diff))
180 if member and username == member.username:
181 member.set_attributes(**r._asdict())
182
183 def get_current_takes(self, cursor=None):
184 """Return a list of member takes for a team.
185 """
186 assert self.IS_PLURAL
187 TAKES = """
188 SELECT member, amount, ctime, mtime
189 FROM current_takes
190 WHERE team=%(team)s
191 ORDER BY ctime DESC
192 """
193 records = (cursor or self.db).all(TAKES, dict(team=self.username))
194 return [r._asdict() for r in records]
195
196 def get_team_take(self, cursor=None):
197 """Return a single take for a team, the team itself's take.
198 """
199 assert self.IS_PLURAL
200 TAKE = "SELECT sum(amount) FROM current_takes WHERE team=%s"
201 total_take = (cursor or self.db).one(TAKE, (self.username,), default=0)
202 team_take = max(self.receiving - total_take, 0)
203 membership = { "ctime": None
204 , "mtime": None
205 , "member": self.username
206 , "amount": team_take
207 }
208 return membership
209
210 def compute_actual_takes(self, cursor=None):
211 """Get the takes, compute the actual amounts, and return an OrderedDict.
212 """
213 actual_takes = OrderedDict()
214 nominal_takes = self.get_current_takes(cursor=cursor)
215 nominal_takes.append(self.get_team_take(cursor=cursor))
216 budget = balance = self.balance + self.receiving
217 for take in nominal_takes:
218 nominal_amount = take['nominal_amount'] = take.pop('amount')
219 actual_amount = take['actual_amount'] = min(nominal_amount, balance)
220 if take['member'] != self.username:
221 balance -= actual_amount
222 take['balance'] = balance
223 take['percentage'] = (actual_amount / budget) if budget > 0 else 0
224 actual_takes[take['member']] = take
225 return actual_takes
226
227 def get_members(self, current_participant):
228 """Return a list of member dicts.
229 """
230 assert self.IS_PLURAL
231 takes = self.compute_actual_takes()
232 members = []
233 for take in takes.values():
234 member = {}
235 member['username'] = take['member']
236 member['take'] = take['nominal_amount']
237 member['balance'] = take['balance']
238 member['percentage'] = take['percentage']
239
240 member['removal_allowed'] = current_participant == self
241 member['editing_allowed'] = False
242 member['is_current_user'] = False
243 if current_participant is not None:
244 if member['username'] == current_participant.username:
245 member['is_current_user'] = True
246 if take['ctime'] is not None:
247 # current user, but not the team itself
248 member['editing_allowed']= True
249
250 member['last_week'] = last_week = self.get_take_last_week_for(member)
251 member['max_this_week'] = self.compute_max_this_week(last_week)
252 members.append(member)
253 return members
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/models/_mixin_team.py b/gratipay/models/_mixin_team.py
--- a/gratipay/models/_mixin_team.py
+++ b/gratipay/models/_mixin_team.py
@@ -71,7 +71,7 @@
return False
def get_take_last_week_for(self, member):
- """What did the user actually take most recently? Used in throttling.
+ """Get the user's nominal take last week. Used in throttling.
"""
assert self.IS_PLURAL
membername = member.username if hasattr(member, 'username') \
@@ -79,15 +79,15 @@
return self.db.one("""
SELECT amount
- FROM transfers
- WHERE tipper=%s AND tippee=%s AND context='take'
- AND timestamp > (
+ FROM takes
+ WHERE team=%s AND member=%s
+ AND mtime < (
SELECT ts_start
FROM paydays
WHERE ts_end > ts_start
ORDER BY ts_start DESC LIMIT 1
)
- ORDER BY timestamp ASC LIMIT 1
+ ORDER BY mtime DESC LIMIT 1
""", (self.username, membername), default=Decimal('0.00'))
|
{"golden_diff": "diff --git a/gratipay/models/_mixin_team.py b/gratipay/models/_mixin_team.py\n--- a/gratipay/models/_mixin_team.py\n+++ b/gratipay/models/_mixin_team.py\n@@ -71,7 +71,7 @@\n return False\n \n def get_take_last_week_for(self, member):\n- \"\"\"What did the user actually take most recently? Used in throttling.\n+ \"\"\"Get the user's nominal take last week. Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n@@ -79,15 +79,15 @@\n return self.db.one(\"\"\"\n \n SELECT amount\n- FROM transfers\n- WHERE tipper=%s AND tippee=%s AND context='take'\n- AND timestamp > (\n+ FROM takes\n+ WHERE team=%s AND member=%s\n+ AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n- ORDER BY timestamp ASC LIMIT 1\n+ ORDER BY mtime DESC LIMIT 1\n \n \"\"\", (self.username, membername), default=Decimal('0.00'))\n", "issue": "Can't update my take\nI tried to increase my take from one of the teams that I belong but was unable to. I got the following error.\n\n\n\nI was increasing to less that double the amount and my history shows that the new amount is less than double the the amount I was attempting to take. \n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom collections import OrderedDict\nfrom decimal import Decimal\n\nfrom aspen.utils import typecheck\n\n\nclass MemberLimitReached(Exception): pass\n\nclass StubParticipantAdded(Exception): pass\n\nclass MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n\n :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team\n\n \"\"\"\n\n # XXX These were all written with the ORM and need to be converted.\n\n def __init__(self, participant):\n self.participant = participant\n\n def show_as_team(self, user):\n \"\"\"Return a boolean, whether to show this participant as a team.\n \"\"\"\n if not self.IS_PLURAL:\n return False\n if user.ADMIN:\n return True\n if not self.get_current_takes():\n if self == user.participant:\n return True\n return False\n return True\n\n def add_member(self, member):\n \"\"\"Add a member to this team.\n \"\"\"\n assert self.IS_PLURAL\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if not member.is_claimed:\n raise StubParticipantAdded\n self.__set_take_for(member, Decimal('0.01'), self)\n\n def remove_member(self, member):\n \"\"\"Remove a member from this team.\n \"\"\"\n assert self.IS_PLURAL\n self.__set_take_for(member, Decimal('0.00'), self)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(username)s, 0.00, %(username)s\n FROM current_takes\n WHERE team=%(username)s\n AND amount > 0\n );\n \"\"\", dict(username=self.username))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.IS_PLURAL\n for take in team.get_current_takes():\n if take['member'] == self.username:\n return True\n return False\n\n def get_take_last_week_for(self, member):\n \"\"\"What did the user actually take most recently? Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n else member['username']\n return self.db.one(\"\"\"\n\n SELECT amount\n FROM transfers\n WHERE tipper=%s AND tippee=%s AND context='take'\n AND timestamp > (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY timestamp ASC LIMIT 1\n\n \"\"\", (self.username, membername), default=Decimal('0.00'))\n\n def get_take_for(self, member):\n \"\"\"Return a Decimal representation of the take for this member, or 0.\n \"\"\"\n assert self.IS_PLURAL\n return self.db.one( \"SELECT amount FROM current_takes \"\n \"WHERE member=%s AND team=%s\"\n , (member.username, self.username)\n , default=Decimal('0.00')\n )\n\n def compute_max_this_week(self, last_week):\n \"\"\"2x last week's take, but at least a dollar.\n \"\"\"\n return max(last_week * Decimal('2'), Decimal('1.00'))\n\n def set_take_for(self, member, take, recorder):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.IS_PLURAL\n\n # lazy import to avoid circular import\n from gratipay.security.user import User\n from gratipay.models.participant import Participant\n\n typecheck( member, Participant\n , take, Decimal\n , recorder, (Participant, User)\n )\n\n last_week = self.get_take_last_week_for(member)\n max_this_week = self.compute_max_this_week(last_week)\n if take > max_this_week:\n take = max_this_week\n\n self.__set_take_for(member, take, recorder)\n return take\n\n def __set_take_for(self, member, amount, recorder):\n assert self.IS_PLURAL\n # XXX Factored out for testing purposes only! :O Use .set_take_for.\n with self.db.get_cursor() as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.username, team=self.username, amount=amount,\n recorder=recorder.username))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for username in set(old_takes.keys()).union(new_takes.keys()):\n if username == self.username:\n continue\n old = old_takes.get(username, {}).get('actual_amount', Decimal(0))\n new = new_takes.get(username, {}).get('actual_amount', Decimal(0))\n diff = new - old\n if diff != 0:\n r = (self.db or cursor).one(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE username=%(username)s\n RETURNING taking, receiving\n \"\"\", dict(username=username, diff=diff))\n if member and username == member.username:\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.IS_PLURAL\n TAKES = \"\"\"\n SELECT member, amount, ctime, mtime\n FROM current_takes\n WHERE team=%(team)s\n ORDER BY ctime DESC\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.username))\n return [r._asdict() for r in records]\n\n def get_team_take(self, cursor=None):\n \"\"\"Return a single take for a team, the team itself's take.\n \"\"\"\n assert self.IS_PLURAL\n TAKE = \"SELECT sum(amount) FROM current_takes WHERE team=%s\"\n total_take = (cursor or self.db).one(TAKE, (self.username,), default=0)\n team_take = max(self.receiving - total_take, 0)\n membership = { \"ctime\": None\n , \"mtime\": None\n , \"member\": self.username\n , \"amount\": team_take\n }\n return membership\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n nominal_takes.append(self.get_team_take(cursor=cursor))\n budget = balance = self.balance + self.receiving\n for take in nominal_takes:\n nominal_amount = take['nominal_amount'] = take.pop('amount')\n actual_amount = take['actual_amount'] = min(nominal_amount, balance)\n if take['member'] != self.username:\n balance -= actual_amount\n take['balance'] = balance\n take['percentage'] = (actual_amount / budget) if budget > 0 else 0\n actual_takes[take['member']] = take\n return actual_takes\n\n def get_members(self, current_participant):\n \"\"\"Return a list of member dicts.\n \"\"\"\n assert self.IS_PLURAL\n takes = self.compute_actual_takes()\n members = []\n for take in takes.values():\n member = {}\n member['username'] = take['member']\n member['take'] = take['nominal_amount']\n member['balance'] = take['balance']\n member['percentage'] = take['percentage']\n\n member['removal_allowed'] = current_participant == self\n member['editing_allowed'] = False\n member['is_current_user'] = False\n if current_participant is not None:\n if member['username'] == current_participant.username:\n member['is_current_user'] = True\n if take['ctime'] is not None:\n # current user, but not the team itself\n member['editing_allowed']= True\n\n member['last_week'] = last_week = self.get_take_last_week_for(member)\n member['max_this_week'] = self.compute_max_this_week(last_week)\n members.append(member)\n return members\n", "path": "gratipay/models/_mixin_team.py"}], "after_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom collections import OrderedDict\nfrom decimal import Decimal\n\nfrom aspen.utils import typecheck\n\n\nclass MemberLimitReached(Exception): pass\n\nclass StubParticipantAdded(Exception): pass\n\nclass MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n\n :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team\n\n \"\"\"\n\n # XXX These were all written with the ORM and need to be converted.\n\n def __init__(self, participant):\n self.participant = participant\n\n def show_as_team(self, user):\n \"\"\"Return a boolean, whether to show this participant as a team.\n \"\"\"\n if not self.IS_PLURAL:\n return False\n if user.ADMIN:\n return True\n if not self.get_current_takes():\n if self == user.participant:\n return True\n return False\n return True\n\n def add_member(self, member):\n \"\"\"Add a member to this team.\n \"\"\"\n assert self.IS_PLURAL\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if not member.is_claimed:\n raise StubParticipantAdded\n self.__set_take_for(member, Decimal('0.01'), self)\n\n def remove_member(self, member):\n \"\"\"Remove a member from this team.\n \"\"\"\n assert self.IS_PLURAL\n self.__set_take_for(member, Decimal('0.00'), self)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(username)s, 0.00, %(username)s\n FROM current_takes\n WHERE team=%(username)s\n AND amount > 0\n );\n \"\"\", dict(username=self.username))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.IS_PLURAL\n for take in team.get_current_takes():\n if take['member'] == self.username:\n return True\n return False\n\n def get_take_last_week_for(self, member):\n \"\"\"Get the user's nominal take last week. Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n else member['username']\n return self.db.one(\"\"\"\n\n SELECT amount\n FROM takes\n WHERE team=%s AND member=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY mtime DESC LIMIT 1\n\n \"\"\", (self.username, membername), default=Decimal('0.00'))\n\n def get_take_for(self, member):\n \"\"\"Return a Decimal representation of the take for this member, or 0.\n \"\"\"\n assert self.IS_PLURAL\n return self.db.one( \"SELECT amount FROM current_takes \"\n \"WHERE member=%s AND team=%s\"\n , (member.username, self.username)\n , default=Decimal('0.00')\n )\n\n def compute_max_this_week(self, last_week):\n \"\"\"2x last week's take, but at least a dollar.\n \"\"\"\n return max(last_week * Decimal('2'), Decimal('1.00'))\n\n def set_take_for(self, member, take, recorder):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.IS_PLURAL\n\n # lazy import to avoid circular import\n from gratipay.security.user import User\n from gratipay.models.participant import Participant\n\n typecheck( member, Participant\n , take, Decimal\n , recorder, (Participant, User)\n )\n\n last_week = self.get_take_last_week_for(member)\n max_this_week = self.compute_max_this_week(last_week)\n if take > max_this_week:\n take = max_this_week\n\n self.__set_take_for(member, take, recorder)\n return take\n\n def __set_take_for(self, member, amount, recorder):\n assert self.IS_PLURAL\n # XXX Factored out for testing purposes only! :O Use .set_take_for.\n with self.db.get_cursor() as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.username, team=self.username, amount=amount,\n recorder=recorder.username))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for username in set(old_takes.keys()).union(new_takes.keys()):\n if username == self.username:\n continue\n old = old_takes.get(username, {}).get('actual_amount', Decimal(0))\n new = new_takes.get(username, {}).get('actual_amount', Decimal(0))\n diff = new - old\n if diff != 0:\n r = (self.db or cursor).one(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE username=%(username)s\n RETURNING taking, receiving\n \"\"\", dict(username=username, diff=diff))\n if member and username == member.username:\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.IS_PLURAL\n TAKES = \"\"\"\n SELECT member, amount, ctime, mtime\n FROM current_takes\n WHERE team=%(team)s\n ORDER BY ctime DESC\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.username))\n return [r._asdict() for r in records]\n\n def get_team_take(self, cursor=None):\n \"\"\"Return a single take for a team, the team itself's take.\n \"\"\"\n assert self.IS_PLURAL\n TAKE = \"SELECT sum(amount) FROM current_takes WHERE team=%s\"\n total_take = (cursor or self.db).one(TAKE, (self.username,), default=0)\n team_take = max(self.receiving - total_take, 0)\n membership = { \"ctime\": None\n , \"mtime\": None\n , \"member\": self.username\n , \"amount\": team_take\n }\n return membership\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n nominal_takes.append(self.get_team_take(cursor=cursor))\n budget = balance = self.balance + self.receiving\n for take in nominal_takes:\n nominal_amount = take['nominal_amount'] = take.pop('amount')\n actual_amount = take['actual_amount'] = min(nominal_amount, balance)\n if take['member'] != self.username:\n balance -= actual_amount\n take['balance'] = balance\n take['percentage'] = (actual_amount / budget) if budget > 0 else 0\n actual_takes[take['member']] = take\n return actual_takes\n\n def get_members(self, current_participant):\n \"\"\"Return a list of member dicts.\n \"\"\"\n assert self.IS_PLURAL\n takes = self.compute_actual_takes()\n members = []\n for take in takes.values():\n member = {}\n member['username'] = take['member']\n member['take'] = take['nominal_amount']\n member['balance'] = take['balance']\n member['percentage'] = take['percentage']\n\n member['removal_allowed'] = current_participant == self\n member['editing_allowed'] = False\n member['is_current_user'] = False\n if current_participant is not None:\n if member['username'] == current_participant.username:\n member['is_current_user'] = True\n if take['ctime'] is not None:\n # current user, but not the team itself\n member['editing_allowed']= True\n\n member['last_week'] = last_week = self.get_take_last_week_for(member)\n member['max_this_week'] = self.compute_max_this_week(last_week)\n members.append(member)\n return members\n", "path": "gratipay/models/_mixin_team.py"}]}
| 3,062 | 278 |
gh_patches_debug_18008
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-3330
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Google logins broken with django-allauth 0.62+
# Recipe
- Open incognito window (just in case it matters)
- Navigate to grand-challenge.org
- Click Third party auth -> Google to login

- Acknowledge that you are sent to a "third party" by clicking continue on the next page.

# Result

> Unexpected Error
No login possible.
@amickan reported that no sentry errors are being recorded. I cannot login, presumably many other people cannot login either.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/profiles/providers/gmail/views.py`
Content:
```
1 from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
2 from allauth.socialaccount.providers.oauth2.views import (
3 OAuth2CallbackView,
4 OAuth2LoginView,
5 )
6
7 from grandchallenge.profiles.providers.gmail.provider import GmailProvider
8
9
10 class GmailOAuth2Adapter(GoogleOAuth2Adapter):
11 provider_id = GmailProvider.id
12
13
14 oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)
15 oauth2_callback = OAuth2CallbackView.adapter_view(GmailOAuth2Adapter)
16
```
Path: `app/grandchallenge/profiles/providers/gmail/provider.py`
Content:
```
1 from allauth.socialaccount.providers.google.provider import GoogleProvider
2
3
4 class GmailProvider(GoogleProvider):
5 id = "gmail"
6 name = "Google"
7
8 def extract_uid(self, data):
9 return str(data["email"])
10
11
12 provider_classes = [GmailProvider]
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/profiles/providers/gmail/provider.py b/app/grandchallenge/profiles/providers/gmail/provider.py
--- a/app/grandchallenge/profiles/providers/gmail/provider.py
+++ b/app/grandchallenge/profiles/providers/gmail/provider.py
@@ -1,9 +1,12 @@
from allauth.socialaccount.providers.google.provider import GoogleProvider
+from grandchallenge.profiles.providers.gmail.views import GmailOAuth2Adapter
+
class GmailProvider(GoogleProvider):
id = "gmail"
name = "Google"
+ oauth2_adapter_class = GmailOAuth2Adapter
def extract_uid(self, data):
return str(data["email"])
diff --git a/app/grandchallenge/profiles/providers/gmail/views.py b/app/grandchallenge/profiles/providers/gmail/views.py
--- a/app/grandchallenge/profiles/providers/gmail/views.py
+++ b/app/grandchallenge/profiles/providers/gmail/views.py
@@ -4,11 +4,9 @@
OAuth2LoginView,
)
-from grandchallenge.profiles.providers.gmail.provider import GmailProvider
-
class GmailOAuth2Adapter(GoogleOAuth2Adapter):
- provider_id = GmailProvider.id
+ provider_id = "gmail"
oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)
|
{"golden_diff": "diff --git a/app/grandchallenge/profiles/providers/gmail/provider.py b/app/grandchallenge/profiles/providers/gmail/provider.py\n--- a/app/grandchallenge/profiles/providers/gmail/provider.py\n+++ b/app/grandchallenge/profiles/providers/gmail/provider.py\n@@ -1,9 +1,12 @@\n from allauth.socialaccount.providers.google.provider import GoogleProvider\n \n+from grandchallenge.profiles.providers.gmail.views import GmailOAuth2Adapter\n+\n \n class GmailProvider(GoogleProvider):\n id = \"gmail\"\n name = \"Google\"\n+ oauth2_adapter_class = GmailOAuth2Adapter\n \n def extract_uid(self, data):\n return str(data[\"email\"])\ndiff --git a/app/grandchallenge/profiles/providers/gmail/views.py b/app/grandchallenge/profiles/providers/gmail/views.py\n--- a/app/grandchallenge/profiles/providers/gmail/views.py\n+++ b/app/grandchallenge/profiles/providers/gmail/views.py\n@@ -4,11 +4,9 @@\n OAuth2LoginView,\n )\n \n-from grandchallenge.profiles.providers.gmail.provider import GmailProvider\n-\n \n class GmailOAuth2Adapter(GoogleOAuth2Adapter):\n- provider_id = GmailProvider.id\n+ provider_id = \"gmail\"\n \n \n oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)\n", "issue": "Google logins broken with django-allauth 0.62+\n# Recipe\r\n\r\n- Open incognito window (just in case it matters)\r\n- Navigate to grand-challenge.org\r\n- Click Third party auth -> Google to login\r\n \r\n\r\n\r\n- Acknowledge that you are sent to a \"third party\" by clicking continue on the next page.\r\n\r\n\r\n\r\n# Result\r\n\r\n\r\n\r\n> Unexpected Error\r\n\r\nNo login possible.\r\n\r\n@amickan reported that no sentry errors are being recorded. I cannot login, presumably many other people cannot login either.\r\n\n", "before_files": [{"content": "from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2CallbackView,\n OAuth2LoginView,\n)\n\nfrom grandchallenge.profiles.providers.gmail.provider import GmailProvider\n\n\nclass GmailOAuth2Adapter(GoogleOAuth2Adapter):\n provider_id = GmailProvider.id\n\n\noauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)\noauth2_callback = OAuth2CallbackView.adapter_view(GmailOAuth2Adapter)\n", "path": "app/grandchallenge/profiles/providers/gmail/views.py"}, {"content": "from allauth.socialaccount.providers.google.provider import GoogleProvider\n\n\nclass GmailProvider(GoogleProvider):\n id = \"gmail\"\n name = \"Google\"\n\n def extract_uid(self, data):\n return str(data[\"email\"])\n\n\nprovider_classes = [GmailProvider]\n", "path": "app/grandchallenge/profiles/providers/gmail/provider.py"}], "after_files": [{"content": "from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2CallbackView,\n OAuth2LoginView,\n)\n\n\nclass GmailOAuth2Adapter(GoogleOAuth2Adapter):\n provider_id = \"gmail\"\n\n\noauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)\noauth2_callback = OAuth2CallbackView.adapter_view(GmailOAuth2Adapter)\n", "path": "app/grandchallenge/profiles/providers/gmail/views.py"}, {"content": "from allauth.socialaccount.providers.google.provider import GoogleProvider\n\nfrom grandchallenge.profiles.providers.gmail.views import GmailOAuth2Adapter\n\n\nclass GmailProvider(GoogleProvider):\n id = \"gmail\"\n name = \"Google\"\n oauth2_adapter_class = GmailOAuth2Adapter\n\n def extract_uid(self, data):\n return str(data[\"email\"])\n\n\nprovider_classes = [GmailProvider]\n", "path": "app/grandchallenge/profiles/providers/gmail/provider.py"}]}
| 772 | 276 |
gh_patches_debug_25268
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2543
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'NoneType' object is not subscriptable
Running `checkov -d .` now emits an exception
```
2022-02-25 17:45:59,050 [MainThread ] [ERROR] Failed to run check: Ensure no NACL allow ingress from 0.0.0.0:0 to port 21 for configuration: {'cidr_block': ['0.0.0.0/0'], 'egress': [False], 'network_acl_id': ['aws_default_network_acl.public.id'], 'protocol': ['-1'], 'rule_action': ['allow'], 'rule_number': [100]} at file: /modules/network/regional/main.tf
Process ForkProcess-1:
Traceback (most recent call last):
File "/opt/homebrew/Cellar/[email protected]/3.10.2/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/opt/homebrew/Cellar/[email protected]/3.10.2/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/parallelizer/parallel_runner.py", line 29, in func_wrapper
result = original_func(item)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py", line 66, in <lambda>
lambda runner: runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py", line 119, in run
self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py", line 215, in check_tf_definition
self.run_all_blocks(definition, self.context, full_file_path, root_folder, report,
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py", line 225, in run_all_blocks
self.run_block(definition[block_type], definitions_context,
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py", line 297, in run_block
results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 121, in scan
result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 135, in run_check
result = check.run(
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 86, in run
raise e
File "/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 73, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 70, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py", line 41, in scan_resource_conf
if not self.check_rule(conf):
File "/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py", line 51, in check_rule
if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):
TypeError: 'NoneType' object is not subscriptable
```
- OS: MacOS 12.2
- Checkov Version 2.0.902
Relevant resource maybe as follows:
```
resource "aws_network_acl_rule" "public_ingress" {
network_acl_id = aws_default_network_acl.public.id
rule_number = 100
egress = false
protocol = "-1"
rule_action = "allow"
cidr_block = "0.0.0.0/0"
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 from checkov.common.util.type_forcers import force_list
4 from checkov.common.util.type_forcers import force_int
5
6
7 class AbsNACLUnrestrictedIngress(BaseResourceCheck):
8 def __init__(self, check_id, port):
9 name = "Ensure no NACL allow ingress from 0.0.0.0:0 to port %d" % port
10 supported_resources = ['aws_network_acl', 'aws_network_acl_rule']
11 categories = [CheckCategories.NETWORKING]
12 super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)
13 self.port = port
14
15 def scan_resource_conf(self, conf):
16 """
17
18 Return PASS if:
19 - The NACL doesnt allows restricted ingress access to the port
20 - The resource is an aws_network_acl of type 'ingress' that does not violate the check.
21
22 Return FAIL if:
23 - The the NACL allows unrestricted access to the port
24
25 Return UNKNOWN if:
26 - the resource is an NACL of type 'egress', OR
27
28 :param conf: aws_network_acl configuration
29 :return: <CheckResult>
30 """
31
32 if conf.get("ingress"):
33 ingress = conf.get("ingress")
34 for rule in ingress:
35 if not self.check_rule(rule):
36 return CheckResult.FAILED
37 return CheckResult.PASSED
38 # maybe its an network_acl_rule
39 if conf.get("network_acl_id"):
40 if not conf.get("egress")[0]:
41 if not self.check_rule(conf):
42 return CheckResult.FAILED
43 return CheckResult.PASSED
44
45 return CheckResult.UNKNOWN
46
47 def check_rule(self, rule):
48 if rule.get('cidr_block'):
49 if rule.get('cidr_block') == ["0.0.0.0/0"]:
50 if rule.get('action') == ["allow"] or rule.get('rule_action') == ["allow"]:
51 if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):
52 return False
53 if rule.get('ipv6_cidr_block'):
54 if rule.get('ipv6_cidr_block') == ["::/0"]:
55 if rule.get('action') == ["allow"] or rule.get('rule_action') == ["allow"]:
56 if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):
57 return False
58 return True
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py b/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py
--- a/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py
+++ b/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py
@@ -48,11 +48,17 @@
if rule.get('cidr_block'):
if rule.get('cidr_block') == ["0.0.0.0/0"]:
if rule.get('action') == ["allow"] or rule.get('rule_action') == ["allow"]:
+ protocol = rule.get('protocol')
+ if protocol and str(protocol[0]) == "-1":
+ return False
if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):
return False
if rule.get('ipv6_cidr_block'):
if rule.get('ipv6_cidr_block') == ["::/0"]:
if rule.get('action') == ["allow"] or rule.get('rule_action') == ["allow"]:
+ protocol = rule.get('protocol')
+ if protocol and str(protocol[0]) == "-1":
+ return False
if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):
return False
return True
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py b/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py\n--- a/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py\n+++ b/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py\n@@ -48,11 +48,17 @@\n if rule.get('cidr_block'):\n if rule.get('cidr_block') == [\"0.0.0.0/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n+ protocol = rule.get('protocol')\n+ if protocol and str(protocol[0]) == \"-1\":\n+ return False\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n if rule.get('ipv6_cidr_block'):\n if rule.get('ipv6_cidr_block') == [\"::/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n+ protocol = rule.get('protocol')\n+ if protocol and str(protocol[0]) == \"-1\":\n+ return False\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n return True\n", "issue": "'NoneType' object is not subscriptable\nRunning `checkov -d .` now emits an exception\r\n\r\n```\r\n2022-02-25 17:45:59,050 [MainThread ] [ERROR] Failed to run check: Ensure no NACL allow ingress from 0.0.0.0:0 to port 21 for configuration: {'cidr_block': ['0.0.0.0/0'], 'egress': [False], 'network_acl_id': ['aws_default_network_acl.public.id'], 'protocol': ['-1'], 'rule_action': ['allow'], 'rule_number': [100]} at file: /modules/network/regional/main.tf\r\nProcess ForkProcess-1:\r\nTraceback (most recent call last):\r\n File \"/opt/homebrew/Cellar/[email protected]/3.10.2/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/opt/homebrew/Cellar/[email protected]/3.10.2/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/parallelizer/parallel_runner.py\", line 29, in func_wrapper\r\n result = original_func(item)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py\", line 66, in <lambda>\r\n lambda runner: runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 119, in run\r\n self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 215, in check_tf_definition\r\n self.run_all_blocks(definition, self.context, full_file_path, root_folder, report,\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 225, in run_all_blocks\r\n self.run_block(definition[block_type], definitions_context,\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 297, in run_block\r\n results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 121, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 135, in run_check\r\n result = check.run(\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 86, in run\r\n raise e\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 73, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 70, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py\", line 41, in scan_resource_conf\r\n if not self.check_rule(conf):\r\n File \"/opt/homebrew/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py\", line 51, in check_rule\r\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\r\nTypeError: 'NoneType' object is not subscriptable\r\n```\r\n\r\n - OS: MacOS 12.2\r\n - Checkov Version 2.0.902\r\n\r\nRelevant resource maybe as follows:\r\n```\r\nresource \"aws_network_acl_rule\" \"public_ingress\" {\r\n network_acl_id = aws_default_network_acl.public.id\r\n rule_number = 100\r\n egress = false\r\n protocol = \"-1\"\r\n rule_action = \"allow\"\r\n cidr_block = \"0.0.0.0/0\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.type_forcers import force_int\n\n\nclass AbsNACLUnrestrictedIngress(BaseResourceCheck):\n def __init__(self, check_id, port):\n name = \"Ensure no NACL allow ingress from 0.0.0.0:0 to port %d\" % port\n supported_resources = ['aws_network_acl', 'aws_network_acl_rule']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def scan_resource_conf(self, conf):\n \"\"\"\n\n Return PASS if:\n - The NACL doesnt allows restricted ingress access to the port\n - The resource is an aws_network_acl of type 'ingress' that does not violate the check.\n\n Return FAIL if:\n - The the NACL allows unrestricted access to the port\n\n Return UNKNOWN if:\n - the resource is an NACL of type 'egress', OR\n\n :param conf: aws_network_acl configuration\n :return: <CheckResult>\n \"\"\"\n\n if conf.get(\"ingress\"):\n ingress = conf.get(\"ingress\")\n for rule in ingress:\n if not self.check_rule(rule):\n return CheckResult.FAILED\n return CheckResult.PASSED\n # maybe its an network_acl_rule\n if conf.get(\"network_acl_id\"):\n if not conf.get(\"egress\")[0]:\n if not self.check_rule(conf):\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n return CheckResult.UNKNOWN\n\n def check_rule(self, rule):\n if rule.get('cidr_block'):\n if rule.get('cidr_block') == [\"0.0.0.0/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n if rule.get('ipv6_cidr_block'):\n if rule.get('ipv6_cidr_block') == [\"::/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n return True\n", "path": "checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.type_forcers import force_int\n\n\nclass AbsNACLUnrestrictedIngress(BaseResourceCheck):\n def __init__(self, check_id, port):\n name = \"Ensure no NACL allow ingress from 0.0.0.0:0 to port %d\" % port\n supported_resources = ['aws_network_acl', 'aws_network_acl_rule']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=check_id, categories=categories, supported_resources=supported_resources)\n self.port = port\n\n def scan_resource_conf(self, conf):\n \"\"\"\n\n Return PASS if:\n - The NACL doesnt allows restricted ingress access to the port\n - The resource is an aws_network_acl of type 'ingress' that does not violate the check.\n\n Return FAIL if:\n - The the NACL allows unrestricted access to the port\n\n Return UNKNOWN if:\n - the resource is an NACL of type 'egress', OR\n\n :param conf: aws_network_acl configuration\n :return: <CheckResult>\n \"\"\"\n\n if conf.get(\"ingress\"):\n ingress = conf.get(\"ingress\")\n for rule in ingress:\n if not self.check_rule(rule):\n return CheckResult.FAILED\n return CheckResult.PASSED\n # maybe its an network_acl_rule\n if conf.get(\"network_acl_id\"):\n if not conf.get(\"egress\")[0]:\n if not self.check_rule(conf):\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n return CheckResult.UNKNOWN\n\n def check_rule(self, rule):\n if rule.get('cidr_block'):\n if rule.get('cidr_block') == [\"0.0.0.0/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n protocol = rule.get('protocol')\n if protocol and str(protocol[0]) == \"-1\":\n return False\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n if rule.get('ipv6_cidr_block'):\n if rule.get('ipv6_cidr_block') == [\"::/0\"]:\n if rule.get('action') == [\"allow\"] or rule.get('rule_action') == [\"allow\"]:\n protocol = rule.get('protocol')\n if protocol and str(protocol[0]) == \"-1\":\n return False\n if int(rule.get('from_port')[0]) <= self.port <= int(rule.get('to_port')[0]):\n return False\n return True\n", "path": "checkov/terraform/checks/resource/aws/AbsNACLUnrestrictedIngress.py"}]}
| 1,987 | 312 |
gh_patches_debug_27016
|
rasdani/github-patches
|
git_diff
|
translate__translate-4062
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
3.0.0 release
I'd like to release 3.0.0 soon to get cleanups and fixes to users.
Any objections to doing that once remaining issues in the milestone https://github.com/translate/translate/milestone/25 are addressed?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 #
2 # Translate Toolkit documentation build configuration file, created by
3 # sphinx-quickstart on Mon Mar 26 23:48:04 2012.
4 #
5 # This file is execfile()d with the current directory set to its containing
6 # dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import os
15 import sys
16
17
18 # If extensions (or modules to document with autodoc) are in another directory,
19 # add these directories to sys.path here. If the directory is relative to the
20 # documentation root, use os.path.abspath to make it absolute, like shown here.
21 #sys.path.insert(0, os.path.abspath('.'))
22
23 sys.path.insert(0, os.path.abspath('_ext'))
24 sys.path.insert(0, os.path.abspath('.'))
25 sys.path.insert(0, os.path.abspath('..'))
26
27 # -- General configuration ----------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #needs_sphinx = '1.0'
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 'translate_docs',
37 'sphinx.ext.autodoc',
38 'sphinx.ext.coverage',
39 'sphinx.ext.extlinks',
40 'sphinx.ext.intersphinx',
41 'sphinx.ext.todo',
42 ]
43
44 # Display todo notes. See http://sphinx-doc.org/ext/todo.html#directive-todo
45 todo_include_todos=True
46
47 # Add any paths that contain templates here, relative to this directory.
48 templates_path = ['_templates']
49
50 # The suffix of source filenames.
51 source_suffix = '.rst'
52
53 # The encoding of source files.
54 #source_encoding = 'utf-8-sig'
55
56 # The master toctree document.
57 master_doc = 'index'
58
59 # General information about the project.
60 project = 'Translate Toolkit'
61 copyright = '2002-2020, Translate'
62
63 # The version info for the project you're documenting, acts as replacement for
64 # |version| and |release|, also used in various other places throughout the
65 # built documents.
66 #
67 # The short X.Y version.
68 version = '2.5.1'
69 # The full version, including alpha/beta/rc tags.
70 release = '2.5.1'
71
72 # The language for content autogenerated by Sphinx. Refer to documentation
73 # for a list of supported languages.
74 #language = None
75
76 # There are two options for replacing |today|: either, you set today to some
77 # non-false value, then it is used:
78 #today = ''
79 # Else, today_fmt is used as the format for a strftime call.
80 #today_fmt = '%B %d, %Y'
81
82 # List of patterns, relative to source directory, that match files and
83 # directories to ignore when looking for source files.
84 exclude_patterns = ['_build', '_themes/README.rst', 'releases/README.rst']
85
86 # The reST default role (used for this markup: `text`) to use for all
87 # documents.
88 #default_role = None
89
90 # If true, '()' will be appended to :func: etc. cross-reference text.
91 #add_function_parentheses = True
92
93 # If true, the current module name will be prepended to all description
94 # unit titles (such as .. function::).
95 #add_module_names = True
96
97 # If true, sectionauthor and moduleauthor directives will be shown in the
98 # output. They are ignored by default.
99 #show_authors = False
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = 'sphinx'
103
104 # A list of ignored prefixes for module index sorting.
105 #modindex_common_prefix = []
106
107 # -- Missing modules --------------------------------------------------
108
109 autodoc_mock_imports = [
110 'aeidon',
111 'BeautifulSoup',
112 'glib',
113 'gobject',
114 'gtk',
115 'iniparse',
116 'vobject',
117 ]
118
119 # -- Options for HTML output --------------------------------------------------
120
121 # The theme to use for HTML and HTML Help pages. See the documentation for
122 # a list of builtin themes.
123 html_theme = 'sphinx-bootstrap'
124
125 # Theme options are theme-specific and customize the look and feel of a theme
126 # further. For a list of options available for each theme, see the
127 # documentation.
128 html_theme_options = {
129 'nosidebar': True,
130 }
131
132 # Add any paths that contain custom themes here, relative to this directory.
133 html_theme_path = ['_themes']
134
135 # The name for this set of Sphinx documents. If None, it defaults to
136 # "<project> v<release> documentation".
137 #html_title = None
138
139 # A shorter title for the navigation bar. Default is the same as html_title.
140 #html_short_title = None
141
142 # The name of an image file (relative to this directory) to place at the top
143 # of the sidebar.
144 #html_logo = None
145
146 # The name of an image file (within the static path) to use as favicon of the
147 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
148 # pixels large.
149 #html_favicon = None
150
151 # Add any paths that contain custom static files (such as style sheets) here,
152 # relative to this directory. They are copied after the builtin static files,
153 # so a file named "default.css" will overwrite the builtin "default.css".
154 html_static_path = ['_static']
155
156 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
157 # using the given strftime format.
158 #html_last_updated_fmt = '%b %d, %Y'
159
160 # If true, SmartyPants will be used to convert quotes and dashes to
161 # typographically correct entities.
162 #html_use_smartypants = True
163
164 # Custom sidebar templates, maps document names to template names.
165 #html_sidebars = {}
166
167 # Additional templates that should be rendered to pages, maps page names to
168 # template names.
169 #html_additional_pages = {}
170
171 # If false, no module index is generated.
172 #html_domain_indices = True
173
174 # If false, no index is generated.
175 #html_use_index = True
176
177 # If true, the index is split into individual pages for each letter.
178 #html_split_index = False
179
180 # If true, links to the reST sources are added to the pages.
181 html_show_sourcelink = False
182
183 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
184 #html_show_sphinx = True
185
186 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
187 #html_show_copyright = True
188
189 # If true, an OpenSearch description file will be output, and all pages will
190 # contain a <link> tag referring to it. The value of this option must be the
191 # base URL from which the finished HTML is served.
192 #html_use_opensearch = ''
193
194 # This is the file name suffix for HTML files (e.g. ".xhtml").
195 #html_file_suffix = None
196
197 # Output file base name for HTML help builder.
198 htmlhelp_basename = 'TranslateToolkitdoc'
199
200
201 # -- Options for LaTeX output -------------------------------------------------
202
203 latex_elements = {
204 # The paper size ('letterpaper' or 'a4paper').
205 #'papersize': 'letterpaper',
206
207 # The font size ('10pt', '11pt' or '12pt').
208 #'pointsize': '10pt',
209
210 # Additional stuff for the LaTeX preamble.
211 #'preamble': '',
212 }
213
214 # Grouping the document tree into LaTeX files. List of tuples
215 # (source start file, target name, title, author, documentclass [howto/manual])
216 latex_documents = [
217 ('index', 'TranslateToolkit.tex', 'Translate Toolkit Documentation',
218 'Translate.org.za', 'manual'),
219 ]
220
221 # The name of an image file (relative to this directory) to place at the top of
222 # the title page.
223 #latex_logo = None
224
225 # For "manual" documents, if this is true, then toplevel headings are parts,
226 # not chapters.
227 #latex_use_parts = False
228
229 # If true, show page references after internal links.
230 #latex_show_pagerefs = False
231
232 # If true, show URL addresses after external links.
233 #latex_show_urls = False
234
235 # Documents to append as an appendix to all manuals.
236 #latex_appendices = []
237
238 # If false, no module index is generated.
239 #latex_domain_indices = True
240
241
242 # -- Options for manual page output -------------------------------------------
243
244 # One entry per manual page. List of tuples
245 # (source start file, name, description, authors, manual section).
246 man_pages = [
247 ('index', 'translatetoolkit', 'Translate Toolkit Documentation',
248 ['Translate.org.za'], 1)
249 ]
250
251 # If true, show URL addresses after external links.
252 #man_show_urls = False
253
254
255 # -- Options for Texinfo output -----------------------------------------------
256
257 # Grouping the document tree into Texinfo files. List of tuples
258 # (source start file, target name, title, author,
259 # dir menu entry, description, category)
260 texinfo_documents = [
261 ('index', 'TranslateToolkit', 'Translate Toolkit Documentation',
262 'Translate.org.za', 'TranslateToolkit', 'One line description of project.',
263 'Miscellaneous'),
264 ]
265
266 # Documents to append as an appendix to all manuals.
267 #texinfo_appendices = []
268
269 # If false, no module index is generated.
270 #texinfo_domain_indices = True
271
272 # How to display URL addresses: 'footnote', 'no', or 'inline'.
273 #texinfo_show_urls = 'footnote'
274
275
276 # -- Coverage checker options -------------------------------------------------
277
278 coverage_ignore_modules = []
279
280 coverage_ignore_functions = ['main']
281
282 coverage_ignore_classes = []
283
284 coverage_write_headline = False
285
286 # -- Options for Intersphinx -------------------------------------------------
287
288 intersphinx_mapping = {
289 'python': ('https://docs.python.org/3.8', None),
290 'pytest': ('https://docs.pytest.org/en/latest/', None),
291 'django': ('http://django.readthedocs.org/en/latest/', None),
292 'pootle': ('http://docs.translatehouse.org/projects/pootle/en/latest/', None),
293 'guide': ('http://docs.translatehouse.org/projects/localization-guide/en/latest/', None),
294 }
295
296
297 # -- Options for Exernal links -------------------------------------------------
298
299 extlinks = {
300 # :role: (URL, prefix)
301 'issue': ('https://github.com/translate/translate/issues/%s',
302 'issue '),
303 'man': ('http://linux.die.net/man/1/%s', ''),
304 'wiki': ('http://translate.sourceforge.net/wiki/%s', ''),
305 'wp': ('http://en.wikipedia.org/wiki/%s', ''),
306 }
307
308 # -- Options for Linkcheck -------------------------------------------------
309
310 # Add regex's here for links that should be ignored.
311 linkcheck_ignore = [
312 'http://your_server.com/filename.html', # Example URL
313 '.*localhost.*',
314 ]
315
```
Path: `translate/__version__.py`
Content:
```
1 #
2 # Copyright 2008-2017 Zuza Software Foundation
3 #
4 # This file is part of the Translate Toolkit.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, see <http://www.gnu.org/licenses/>.
18
19 """This file contains the version of the Translate Toolkit."""
20
21 build = 25010
22 """The build number is used by external users of the Translate Toolkit to
23 trigger refreshes. Thus increase the build number whenever changes are made to
24 code touching stats or quality checks. An increased build number will force a
25 toolkit user, like Pootle, to regenerate it's stored stats and check
26 results."""
27
28 sver = "2.5.1"
29 """Human readable version number. Used for version number display."""
30
31 ver = (2, 5, 1)
32 """Machine readable version number. Used by tools that need to adjust code
33 paths based on a Translate Toolkit release number."""
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -65,9 +65,9 @@
# built documents.
#
# The short X.Y version.
-version = '2.5.1'
+version = '3.0.0'
# The full version, including alpha/beta/rc tags.
-release = '2.5.1'
+release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/translate/__version__.py b/translate/__version__.py
--- a/translate/__version__.py
+++ b/translate/__version__.py
@@ -18,16 +18,16 @@
"""This file contains the version of the Translate Toolkit."""
-build = 25010
+build = 30000
"""The build number is used by external users of the Translate Toolkit to
trigger refreshes. Thus increase the build number whenever changes are made to
code touching stats or quality checks. An increased build number will force a
toolkit user, like Pootle, to regenerate it's stored stats and check
results."""
-sver = "2.5.1"
+sver = "3.0.0"
"""Human readable version number. Used for version number display."""
-ver = (2, 5, 1)
+ver = (3, 0, 0)
"""Machine readable version number. Used by tools that need to adjust code
paths based on a Translate Toolkit release number."""
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -65,9 +65,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '2.5.1'\n+version = '3.0.0'\n # The full version, including alpha/beta/rc tags.\n-release = '2.5.1'\n+release = '3.0.0'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/translate/__version__.py b/translate/__version__.py\n--- a/translate/__version__.py\n+++ b/translate/__version__.py\n@@ -18,16 +18,16 @@\n \n \"\"\"This file contains the version of the Translate Toolkit.\"\"\"\n \n-build = 25010\n+build = 30000\n \"\"\"The build number is used by external users of the Translate Toolkit to\n trigger refreshes. Thus increase the build number whenever changes are made to\n code touching stats or quality checks. An increased build number will force a\n toolkit user, like Pootle, to regenerate it's stored stats and check\n results.\"\"\"\n \n-sver = \"2.5.1\"\n+sver = \"3.0.0\"\n \"\"\"Human readable version number. Used for version number display.\"\"\"\n \n-ver = (2, 5, 1)\n+ver = (3, 0, 0)\n \"\"\"Machine readable version number. Used by tools that need to adjust code\n paths based on a Translate Toolkit release number.\"\"\"\n", "issue": "3.0.0 release\nI'd like to release 3.0.0 soon to get cleanups and fixes to users.\r\n\r\nAny objections to doing that once remaining issues in the milestone https://github.com/translate/translate/milestone/25 are addressed?\n", "before_files": [{"content": "#\n# Translate Toolkit documentation build configuration file, created by\n# sphinx-quickstart on Mon Mar 26 23:48:04 2012.\n#\n# This file is execfile()d with the current directory set to its containing\n# dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\nsys.path.insert(0, os.path.abspath('_ext'))\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'translate_docs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n]\n\n# Display todo notes. See http://sphinx-doc.org/ext/todo.html#directive-todo\ntodo_include_todos=True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Translate Toolkit'\ncopyright = '2002-2020, Translate'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '2.5.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '2.5.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_themes/README.rst', 'releases/README.rst']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# -- Missing modules --------------------------------------------------\n\nautodoc_mock_imports = [\n 'aeidon',\n 'BeautifulSoup',\n 'glib',\n 'gobject',\n 'gtk',\n 'iniparse',\n 'vobject',\n]\n\n# -- Options for HTML output --------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx-bootstrap'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'nosidebar': True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'TranslateToolkitdoc'\n\n\n# -- Options for LaTeX output -------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual])\nlatex_documents = [\n ('index', 'TranslateToolkit.tex', 'Translate Toolkit Documentation',\n 'Translate.org.za', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output -------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'translatetoolkit', 'Translate Toolkit Documentation',\n ['Translate.org.za'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'TranslateToolkit', 'Translate Toolkit Documentation',\n 'Translate.org.za', 'TranslateToolkit', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n\n# -- Coverage checker options -------------------------------------------------\n\ncoverage_ignore_modules = []\n\ncoverage_ignore_functions = ['main']\n\ncoverage_ignore_classes = []\n\ncoverage_write_headline = False\n\n# -- Options for Intersphinx -------------------------------------------------\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.8', None),\n 'pytest': ('https://docs.pytest.org/en/latest/', None),\n 'django': ('http://django.readthedocs.org/en/latest/', None),\n 'pootle': ('http://docs.translatehouse.org/projects/pootle/en/latest/', None),\n 'guide': ('http://docs.translatehouse.org/projects/localization-guide/en/latest/', None),\n}\n\n\n# -- Options for Exernal links -------------------------------------------------\n\nextlinks = {\n # :role: (URL, prefix)\n 'issue': ('https://github.com/translate/translate/issues/%s',\n 'issue '),\n 'man': ('http://linux.die.net/man/1/%s', ''),\n 'wiki': ('http://translate.sourceforge.net/wiki/%s', ''),\n 'wp': ('http://en.wikipedia.org/wiki/%s', ''),\n}\n\n# -- Options for Linkcheck -------------------------------------------------\n\n# Add regex's here for links that should be ignored.\nlinkcheck_ignore = [\n 'http://your_server.com/filename.html', # Example URL\n '.*localhost.*',\n]\n", "path": "docs/conf.py"}, {"content": "#\n# Copyright 2008-2017 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"This file contains the version of the Translate Toolkit.\"\"\"\n\nbuild = 25010\n\"\"\"The build number is used by external users of the Translate Toolkit to\ntrigger refreshes. Thus increase the build number whenever changes are made to\ncode touching stats or quality checks. An increased build number will force a\ntoolkit user, like Pootle, to regenerate it's stored stats and check\nresults.\"\"\"\n\nsver = \"2.5.1\"\n\"\"\"Human readable version number. Used for version number display.\"\"\"\n\nver = (2, 5, 1)\n\"\"\"Machine readable version number. Used by tools that need to adjust code\npaths based on a Translate Toolkit release number.\"\"\"\n", "path": "translate/__version__.py"}], "after_files": [{"content": "#\n# Translate Toolkit documentation build configuration file, created by\n# sphinx-quickstart on Mon Mar 26 23:48:04 2012.\n#\n# This file is execfile()d with the current directory set to its containing\n# dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\nsys.path.insert(0, os.path.abspath('_ext'))\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration ----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'translate_docs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n]\n\n# Display todo notes. See http://sphinx-doc.org/ext/todo.html#directive-todo\ntodo_include_todos=True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Translate Toolkit'\ncopyright = '2002-2020, Translate'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '3.0.0'\n# The full version, including alpha/beta/rc tags.\nrelease = '3.0.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_themes/README.rst', 'releases/README.rst']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# -- Missing modules --------------------------------------------------\n\nautodoc_mock_imports = [\n 'aeidon',\n 'BeautifulSoup',\n 'glib',\n 'gobject',\n 'gtk',\n 'iniparse',\n 'vobject',\n]\n\n# -- Options for HTML output --------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx-bootstrap'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'nosidebar': True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'TranslateToolkitdoc'\n\n\n# -- Options for LaTeX output -------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual])\nlatex_documents = [\n ('index', 'TranslateToolkit.tex', 'Translate Toolkit Documentation',\n 'Translate.org.za', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output -------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'translatetoolkit', 'Translate Toolkit Documentation',\n ['Translate.org.za'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'TranslateToolkit', 'Translate Toolkit Documentation',\n 'Translate.org.za', 'TranslateToolkit', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n\n# -- Coverage checker options -------------------------------------------------\n\ncoverage_ignore_modules = []\n\ncoverage_ignore_functions = ['main']\n\ncoverage_ignore_classes = []\n\ncoverage_write_headline = False\n\n# -- Options for Intersphinx -------------------------------------------------\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.8', None),\n 'pytest': ('https://docs.pytest.org/en/latest/', None),\n 'django': ('http://django.readthedocs.org/en/latest/', None),\n 'pootle': ('http://docs.translatehouse.org/projects/pootle/en/latest/', None),\n 'guide': ('http://docs.translatehouse.org/projects/localization-guide/en/latest/', None),\n}\n\n\n# -- Options for Exernal links -------------------------------------------------\n\nextlinks = {\n # :role: (URL, prefix)\n 'issue': ('https://github.com/translate/translate/issues/%s',\n 'issue '),\n 'man': ('http://linux.die.net/man/1/%s', ''),\n 'wiki': ('http://translate.sourceforge.net/wiki/%s', ''),\n 'wp': ('http://en.wikipedia.org/wiki/%s', ''),\n}\n\n# -- Options for Linkcheck -------------------------------------------------\n\n# Add regex's here for links that should be ignored.\nlinkcheck_ignore = [\n 'http://your_server.com/filename.html', # Example URL\n '.*localhost.*',\n]\n", "path": "docs/conf.py"}, {"content": "#\n# Copyright 2008-2017 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"This file contains the version of the Translate Toolkit.\"\"\"\n\nbuild = 30000\n\"\"\"The build number is used by external users of the Translate Toolkit to\ntrigger refreshes. Thus increase the build number whenever changes are made to\ncode touching stats or quality checks. An increased build number will force a\ntoolkit user, like Pootle, to regenerate it's stored stats and check\nresults.\"\"\"\n\nsver = \"3.0.0\"\n\"\"\"Human readable version number. Used for version number display.\"\"\"\n\nver = (3, 0, 0)\n\"\"\"Machine readable version number. Used by tools that need to adjust code\npaths based on a Translate Toolkit release number.\"\"\"\n", "path": "translate/__version__.py"}]}
| 3,952 | 349 |
gh_patches_debug_987
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3070
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Progress bar is cool but...
It requires the terminal to be `reset` when run in a screen session. Also, for cactus anyway, it spends the vast majority of the runtime at 99%/100%.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-558)
┆Issue Number: TOIL-558
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (C) 2015-2016 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from setuptools import find_packages, setup
15 import os
16
17
18 def runSetup():
19 """
20 Calls setup(). This function exists so the setup() invocation preceded more internal
21 functionality. The `version` module is imported dynamically by importVersion() below.
22 """
23 boto = 'boto==2.48.0'
24 boto3 = 'boto3>=1.7.50, <2.0'
25 futures = 'futures==3.1.1'
26 pycryptodome = 'pycryptodome==3.5.1'
27 pymesos = 'pymesos==0.3.15'
28 psutil = 'psutil >= 3.0.1, <6'
29 pynacl = 'pynacl==1.3.0'
30 gcs = 'google-cloud-storage==1.6.0'
31 gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
32 apacheLibcloud = 'apache-libcloud==2.2.1'
33 cwltool = 'cwltool==3.0.20200324120055'
34 galaxyToolUtil = 'galaxy-tool-util'
35 htcondor = 'htcondor>=8.6.0'
36 kubernetes = 'kubernetes>=10, <11'
37 idna = 'idna>=2'
38 pytz = 'pytz>=2012'
39 dill = 'dill==0.3.1.1'
40 six = 'six>=1.10.0'
41 future = 'future'
42 requests = 'requests>=2, <3'
43 docker = 'docker==2.5.1'
44 dateutil = 'python-dateutil'
45 addict = 'addict<=2.2.0'
46 pathlib2 = 'pathlib2==2.3.2'
47 enlighten = 'enlighten>=1.5.1, <2'
48
49 core_reqs = [
50 dill,
51 six,
52 future,
53 requests,
54 docker,
55 dateutil,
56 psutil,
57 addict,
58 pathlib2,
59 pytz,
60 enlighten]
61
62 aws_reqs = [
63 boto,
64 boto3,
65 futures,
66 pycryptodome]
67 cwl_reqs = [
68 cwltool,
69 galaxyToolUtil]
70 encryption_reqs = [
71 pynacl]
72 google_reqs = [
73 gcs_oauth2_boto_plugin, # is this being used??
74 apacheLibcloud,
75 gcs]
76 htcondor_reqs = [
77 htcondor]
78 kubernetes_reqs = [
79 kubernetes,
80 idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.
81 mesos_reqs = [
82 pymesos,
83 psutil]
84 wdl_reqs = []
85
86
87 # htcondor is not supported by apple
88 # this is tricky to conditionally support in 'all' due
89 # to how wheels work, so it is not included in all and
90 # must be explicitly installed as an extra
91 all_reqs = \
92 aws_reqs + \
93 cwl_reqs + \
94 encryption_reqs + \
95 google_reqs + \
96 kubernetes_reqs + \
97 mesos_reqs
98
99
100 setup(
101 name='toil',
102 version=version.distVersion,
103 description='Pipeline management software for clusters.',
104 author='Benedict Paten',
105 author_email='[email protected]',
106 url="https://github.com/DataBiosphere/toil",
107 classifiers=[
108 'Development Status :: 5 - Production/Stable',
109 'Environment :: Console',
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Intended Audience :: Healthcare Industry',
113 'License :: OSI Approved :: Apache Software License',
114 'Natural Language :: English',
115 'Operating System :: MacOS :: MacOS X',
116 'Operating System :: POSIX',
117 'Operating System :: POSIX :: Linux',
118 'Programming Language :: Python :: 3.6',
119 'Topic :: Scientific/Engineering',
120 'Topic :: Scientific/Engineering :: Bio-Informatics',
121 'Topic :: Scientific/Engineering :: Astronomy',
122 'Topic :: Scientific/Engineering :: Atmospheric Science',
123 'Topic :: Scientific/Engineering :: Information Analysis',
124 'Topic :: Scientific/Engineering :: Medical Science Apps.',
125 'Topic :: System :: Distributed Computing',
126 'Topic :: Utilities'],
127 license="Apache License v2.0",
128 python_requires=">=3.6",
129 install_requires=core_reqs,
130 extras_require={
131 'aws': aws_reqs,
132 'cwl': cwl_reqs,
133 'encryption': encryption_reqs,
134 'google': google_reqs,
135 'htcondor:sys_platform!="darwin"': htcondor_reqs,
136 'kubernetes': kubernetes_reqs,
137 'mesos': mesos_reqs,
138 'wdl': wdl_reqs,
139 'all': all_reqs},
140 package_dir={'': 'src'},
141 packages=find_packages(where='src',
142 # Note that we intentionally include the top-level `test` package for
143 # functionality like the @experimental and @integrative decoratorss:
144 exclude=['*.test.*']),
145 package_data = {
146 '': ['*.yml', 'cloud-config'],
147 },
148 # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
149 # you can't just change them here. Luckily, most of them are pretty unique strings, and thus
150 # easy to search for.
151 entry_points={
152 'console_scripts': [
153 'toil = toil.utils.toilMain:main',
154 '_toil_worker = toil.worker:main',
155 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',
156 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
157 'toil-wdl-runner = toil.wdl.toilwdl:main',
158 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',
159 '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})
160
161
162 def importVersion():
163 """
164 Load and return the module object for src/toil/version.py, generating it from the template if
165 required.
166 """
167 import imp
168 try:
169 # Attempt to load the template first. It only exists in a working copy cloned via git.
170 import version_template
171 except ImportError:
172 # If loading the template fails we must be in a unpacked source distribution and
173 # src/toil/version.py will already exist.
174 pass
175 else:
176 # Use the template to generate src/toil/version.py
177 import os
178 import errno
179 from tempfile import NamedTemporaryFile
180
181 new = version_template.expand_()
182 try:
183 with open('src/toil/version.py') as f:
184 old = f.read()
185 except IOError as e:
186 if e.errno == errno.ENOENT:
187 old = None
188 else:
189 raise
190
191 if old != new:
192 with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:
193 f.write(new)
194 os.rename(f.name, 'src/toil/version.py')
195 # Unfortunately, we can't use a straight import here because that would also load the stuff
196 # defined in src/toil/__init__.py which imports modules from external dependencies that may
197 # yet to be installed when setup.py is invoked.
198 return imp.load_source('toil.version', 'src/toil/version.py')
199
200
201 version = importVersion()
202 runSetup()
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,7 @@
dateutil = 'python-dateutil'
addict = 'addict<=2.2.0'
pathlib2 = 'pathlib2==2.3.2'
- enlighten = 'enlighten>=1.5.1, <2'
+ enlighten = 'enlighten>=1.5.2, <2'
core_reqs = [
dill,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,7 @@\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n- enlighten = 'enlighten>=1.5.1, <2'\n+ enlighten = 'enlighten>=1.5.2, <2'\n \n core_reqs = [\n dill,\n", "issue": "Progress bar is cool but...\nIt requires the terminal to be `reset` when run in a screen session. Also, for cactus anyway, it spends the vast majority of the runtime at 99%/100%.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-558)\n\u2506Issue Number: TOIL-558\n\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20200324120055'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n dill = 'dill==0.3.1.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n enlighten = 'enlighten>=1.5.1, <2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pathlib2,\n pytz,\n enlighten]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20200324120055'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n dill = 'dill==0.3.1.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n enlighten = 'enlighten>=1.5.2, <2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pathlib2,\n pytz,\n enlighten]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py"}]}
| 2,676 | 118 |
gh_patches_debug_32497
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1600
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: abfallwirtschaft_pforzheim_de has change the URL.
### I Have A Problem With:
A specific source
### What's Your Problem
The URL changes from "https://www.abfallwirtschaft-pforzheim.de/kundenportal/abfallkalender" to "https://www.abfallwirtschaft-pforzheim.de/abfallkalender". On the new Site you need to select a checkbox for the year. I think this option would disappear on the beginning of the next year. But the addon doesnt show me the calendar for 2023 anymore. Its complete empty.
### Source (if relevant)
abfallwirtschaft_pforzheim_de
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
abfallwirtschaft_pforzheim_de
```
### Checklist Source Error
- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [ ] Checked that the website of your service provider is still working
- [ ] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py`
Content:
```
1 from html.parser import HTMLParser
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 # Source code based on rh_entsorgung_de.md
8 TITLE = "Abfallwirtschaft Pforzheim"
9 DESCRIPTION = "Source for Abfallwirtschaft Pforzheim."
10 URL = "https://www.abfallwirtschaft-pforzheim.de"
11 TEST_CASES = {
12 "Abnobstraße": {
13 "street": "Abnobastraße",
14 "house_number": 3,
15 "address_suffix": "",
16 },
17 "Im Buchbusch": {
18 "street": "Im Buchbusch",
19 "house_number": 12,
20 },
21 "Eisenbahnstraße": {
22 "street": "Eisenbahnstraße",
23 "house_number": 29,
24 "address_suffix": "-33",
25 },
26 }
27
28 ICON_MAP = {
29 "Restmuell": "mdi:trash-can",
30 "Biobehaelter": "mdi:leaf",
31 "Papierbehaelter": "mdi:package-variant",
32 "Gelbe": "mdi:recycle",
33 "Grossmuellbehaelter": "mdi:delete-circle",
34 }
35
36
37 API_URL = "https://onlineservices.abfallwirtschaft-pforzheim.de/WasteManagementPforzheim/WasteManagementServlet"
38
39 # Parser for HTML input (hidden) text
40
41
42 class HiddenInputParser(HTMLParser):
43 def __init__(self):
44 super().__init__()
45 self._args = {}
46
47 @property
48 def args(self):
49 return self._args
50
51 def handle_starttag(self, tag, attrs):
52 if tag == "input":
53 d = dict(attrs)
54 if str(d["type"]).lower() == "hidden":
55 self._args[d["name"]] = d["value"] if "value" in d else ""
56
57
58 class Source:
59 def __init__(self, street: str, house_number: int, address_suffix: str = ""):
60 self._street = street
61 self._hnr = house_number
62 self._suffix = address_suffix
63 self._ics = ICS()
64
65 def fetch(self):
66 session = requests.session()
67
68 r = session.get(
69 API_URL,
70 params={"SubmitAction": "wasteDisposalServices",
71 "InFrameMode": "TRUE"},
72 )
73 r.raise_for_status()
74 r.encoding = "utf-8"
75
76 parser = HiddenInputParser()
77 parser.feed(r.text)
78
79 args = parser.args
80 args["Ort"] = self._street[0].upper()
81 args["Strasse"] = self._street
82 args["Hausnummer"] = str(self._hnr)
83 args["Hausnummerzusatz"] = self._suffix
84 args["SubmitAction"] = "CITYCHANGED"
85 r = session.post(
86 API_URL,
87 data=args,
88 )
89 r.raise_for_status()
90
91 args["SubmitAction"] = "forward"
92 args["ContainerGewaehltRM"] = "on"
93 args["ContainerGewaehltBM"] = "on"
94 args["ContainerGewaehltLVP"] = "on"
95 args["ContainerGewaehltPA"] = "on"
96 args["ContainerGewaehltPrMuell"] = "on"
97 r = session.post(
98 API_URL,
99 data=args,
100 )
101 r.raise_for_status()
102
103 args["ApplicationName"] = "com.athos.nl.mvc.abfterm.AbfuhrTerminModel"
104 args["SubmitAction"] = "filedownload_ICAL"
105
106 r = session.post(
107 API_URL,
108 data=args,
109 )
110 r.raise_for_status()
111
112 dates = self._ics.convert(r.text)
113
114 entries = []
115 for d in dates:
116 entries.append(
117 Collection(
118 d[0], d[1], ICON_MAP.get(d[1].split(" ")[0])
119 )
120 )
121 return entries
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py
@@ -1,3 +1,4 @@
+from datetime import datetime
from html.parser import HTMLParser
import requests
@@ -63,12 +64,21 @@
self._ics = ICS()
def fetch(self):
+ now = datetime.now()
+ entries = self.get_data(now.year)
+ if now.month == 12:
+ try:
+ entries += self.get_data(now.year + 1)
+ except Exception:
+ pass
+ return entries
+
+ def get_data(self, year):
session = requests.session()
r = session.get(
API_URL,
- params={"SubmitAction": "wasteDisposalServices",
- "InFrameMode": "TRUE"},
+ params={"SubmitAction": "wasteDisposalServices", "InFrameMode": "TRUE"},
)
r.raise_for_status()
r.encoding = "utf-8"
@@ -82,6 +92,7 @@
args["Hausnummer"] = str(self._hnr)
args["Hausnummerzusatz"] = self._suffix
args["SubmitAction"] = "CITYCHANGED"
+ args["Zeitraum"] = f"Jahresübersicht {year}"
r = session.post(
API_URL,
data=args,
@@ -113,9 +124,5 @@
entries = []
for d in dates:
- entries.append(
- Collection(
- d[0], d[1], ICON_MAP.get(d[1].split(" ")[0])
- )
- )
+ entries.append(Collection(d[0], d[1], ICON_MAP.get(d[1].split(" ")[0])))
return entries
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py\n@@ -1,3 +1,4 @@\n+from datetime import datetime\n from html.parser import HTMLParser\n \n import requests\n@@ -63,12 +64,21 @@\n self._ics = ICS()\n \n def fetch(self):\n+ now = datetime.now()\n+ entries = self.get_data(now.year)\n+ if now.month == 12:\n+ try:\n+ entries += self.get_data(now.year + 1)\n+ except Exception:\n+ pass\n+ return entries\n+\n+ def get_data(self, year):\n session = requests.session()\n \n r = session.get(\n API_URL,\n- params={\"SubmitAction\": \"wasteDisposalServices\",\n- \"InFrameMode\": \"TRUE\"},\n+ params={\"SubmitAction\": \"wasteDisposalServices\", \"InFrameMode\": \"TRUE\"},\n )\n r.raise_for_status()\n r.encoding = \"utf-8\"\n@@ -82,6 +92,7 @@\n args[\"Hausnummer\"] = str(self._hnr)\n args[\"Hausnummerzusatz\"] = self._suffix\n args[\"SubmitAction\"] = \"CITYCHANGED\"\n+ args[\"Zeitraum\"] = f\"Jahres\u00fcbersicht {year}\"\n r = session.post(\n API_URL,\n data=args,\n@@ -113,9 +124,5 @@\n \n entries = []\n for d in dates:\n- entries.append(\n- Collection(\n- d[0], d[1], ICON_MAP.get(d[1].split(\" \")[0])\n- )\n- )\n+ entries.append(Collection(d[0], d[1], ICON_MAP.get(d[1].split(\" \")[0])))\n return entries\n", "issue": "[Bug]: abfallwirtschaft_pforzheim_de has change the URL.\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe URL changes from \"https://www.abfallwirtschaft-pforzheim.de/kundenportal/abfallkalender\" to \"https://www.abfallwirtschaft-pforzheim.de/abfallkalender\". On the new Site you need to select a checkbox for the year. I think this option would disappear on the beginning of the next year. But the addon doesnt show me the calendar for 2023 anymore. Its complete empty.\n\n### Source (if relevant)\n\nabfallwirtschaft_pforzheim_de\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nabfallwirtschaft_pforzheim_de\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [ ] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\n# Source code based on rh_entsorgung_de.md\nTITLE = \"Abfallwirtschaft Pforzheim\"\nDESCRIPTION = \"Source for Abfallwirtschaft Pforzheim.\"\nURL = \"https://www.abfallwirtschaft-pforzheim.de\"\nTEST_CASES = {\n \"Abnobstra\u00dfe\": {\n \"street\": \"Abnobastra\u00dfe\",\n \"house_number\": 3,\n \"address_suffix\": \"\",\n },\n \"Im Buchbusch\": {\n \"street\": \"Im Buchbusch\",\n \"house_number\": 12,\n },\n \"Eisenbahnstra\u00dfe\": {\n \"street\": \"Eisenbahnstra\u00dfe\",\n \"house_number\": 29,\n \"address_suffix\": \"-33\",\n },\n}\n\nICON_MAP = {\n \"Restmuell\": \"mdi:trash-can\",\n \"Biobehaelter\": \"mdi:leaf\",\n \"Papierbehaelter\": \"mdi:package-variant\",\n \"Gelbe\": \"mdi:recycle\",\n \"Grossmuellbehaelter\": \"mdi:delete-circle\",\n}\n\n\nAPI_URL = \"https://onlineservices.abfallwirtschaft-pforzheim.de/WasteManagementPforzheim/WasteManagementServlet\"\n\n# Parser for HTML input (hidden) text\n\n\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if str(d[\"type\"]).lower() == \"hidden\":\n self._args[d[\"name\"]] = d[\"value\"] if \"value\" in d else \"\"\n\n\nclass Source:\n def __init__(self, street: str, house_number: int, address_suffix: str = \"\"):\n self._street = street\n self._hnr = house_number\n self._suffix = address_suffix\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n\n r = session.get(\n API_URL,\n params={\"SubmitAction\": \"wasteDisposalServices\",\n \"InFrameMode\": \"TRUE\"},\n )\n r.raise_for_status()\n r.encoding = \"utf-8\"\n\n parser = HiddenInputParser()\n parser.feed(r.text)\n\n args = parser.args\n args[\"Ort\"] = self._street[0].upper()\n args[\"Strasse\"] = self._street\n args[\"Hausnummer\"] = str(self._hnr)\n args[\"Hausnummerzusatz\"] = self._suffix\n args[\"SubmitAction\"] = \"CITYCHANGED\"\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n args[\"SubmitAction\"] = \"forward\"\n args[\"ContainerGewaehltRM\"] = \"on\"\n args[\"ContainerGewaehltBM\"] = \"on\"\n args[\"ContainerGewaehltLVP\"] = \"on\"\n args[\"ContainerGewaehltPA\"] = \"on\"\n args[\"ContainerGewaehltPrMuell\"] = \"on\"\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n args[\"ApplicationName\"] = \"com.athos.nl.mvc.abfterm.AbfuhrTerminModel\"\n args[\"SubmitAction\"] = \"filedownload_ICAL\"\n\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(\n Collection(\n d[0], d[1], ICON_MAP.get(d[1].split(\" \")[0])\n )\n )\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py"}], "after_files": [{"content": "from datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\n# Source code based on rh_entsorgung_de.md\nTITLE = \"Abfallwirtschaft Pforzheim\"\nDESCRIPTION = \"Source for Abfallwirtschaft Pforzheim.\"\nURL = \"https://www.abfallwirtschaft-pforzheim.de\"\nTEST_CASES = {\n \"Abnobstra\u00dfe\": {\n \"street\": \"Abnobastra\u00dfe\",\n \"house_number\": 3,\n \"address_suffix\": \"\",\n },\n \"Im Buchbusch\": {\n \"street\": \"Im Buchbusch\",\n \"house_number\": 12,\n },\n \"Eisenbahnstra\u00dfe\": {\n \"street\": \"Eisenbahnstra\u00dfe\",\n \"house_number\": 29,\n \"address_suffix\": \"-33\",\n },\n}\n\nICON_MAP = {\n \"Restmuell\": \"mdi:trash-can\",\n \"Biobehaelter\": \"mdi:leaf\",\n \"Papierbehaelter\": \"mdi:package-variant\",\n \"Gelbe\": \"mdi:recycle\",\n \"Grossmuellbehaelter\": \"mdi:delete-circle\",\n}\n\n\nAPI_URL = \"https://onlineservices.abfallwirtschaft-pforzheim.de/WasteManagementPforzheim/WasteManagementServlet\"\n\n# Parser for HTML input (hidden) text\n\n\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if str(d[\"type\"]).lower() == \"hidden\":\n self._args[d[\"name\"]] = d[\"value\"] if \"value\" in d else \"\"\n\n\nclass Source:\n def __init__(self, street: str, house_number: int, address_suffix: str = \"\"):\n self._street = street\n self._hnr = house_number\n self._suffix = address_suffix\n self._ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n entries = self.get_data(now.year)\n if now.month == 12:\n try:\n entries += self.get_data(now.year + 1)\n except Exception:\n pass\n return entries\n\n def get_data(self, year):\n session = requests.session()\n\n r = session.get(\n API_URL,\n params={\"SubmitAction\": \"wasteDisposalServices\", \"InFrameMode\": \"TRUE\"},\n )\n r.raise_for_status()\n r.encoding = \"utf-8\"\n\n parser = HiddenInputParser()\n parser.feed(r.text)\n\n args = parser.args\n args[\"Ort\"] = self._street[0].upper()\n args[\"Strasse\"] = self._street\n args[\"Hausnummer\"] = str(self._hnr)\n args[\"Hausnummerzusatz\"] = self._suffix\n args[\"SubmitAction\"] = \"CITYCHANGED\"\n args[\"Zeitraum\"] = f\"Jahres\u00fcbersicht {year}\"\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n args[\"SubmitAction\"] = \"forward\"\n args[\"ContainerGewaehltRM\"] = \"on\"\n args[\"ContainerGewaehltBM\"] = \"on\"\n args[\"ContainerGewaehltLVP\"] = \"on\"\n args[\"ContainerGewaehltPA\"] = \"on\"\n args[\"ContainerGewaehltPrMuell\"] = \"on\"\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n args[\"ApplicationName\"] = \"com.athos.nl.mvc.abfterm.AbfuhrTerminModel\"\n args[\"SubmitAction\"] = \"filedownload_ICAL\"\n\n r = session.post(\n API_URL,\n data=args,\n )\n r.raise_for_status()\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1], ICON_MAP.get(d[1].split(\" \")[0])))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/abfallwirtschaft_pforzheim_de.py"}]}
| 1,799 | 481 |
gh_patches_debug_32686
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect conversion from gdb.Value to python int
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
When debugging certain binary, pwndbg may show incorrect register value, missing high 32 bit for 64 bit register.
e.g.
```
pwndbg> regs rax
RAX 0x555c4160
pwndbg> i r rax
rax 0x5555555c4160 93824992690528
pwndbg> telescope $rax
<Could not read memory at 0x555c4160>
pwndbg> regs rsp
RSP 0xffffffffffffc2f0
pwndbg> i r rsp
rsp 0x7fffffffc2f0 0x7fffffffc2f0
pwndbg> telescope $rsp
<Could not read memory at 0xffffc2f0>
pwndbg>
```
**If I strip the binary, everything goes well.**
After checking code of pwndbg, I found that the bug happens when trying to convert a `gdb.Value` instance to python `int`.
```
pwndbg> python
>print(hex(pwndbg.regs.rsp))
>rsp = pwndbg.regs.value
>print(type(rsp))
>print(rsp.type)
>print(rsp)
>print(hex(rsp))
>print(int(rsp))
>print(long(rsp))
>print(str(rsp))
>print(hex(pwndbg.arch.ptrmask))
>print(hex(int(rsp) & pwndbg.arch.ptrmask))
>end
0xffffffffffffc2f0
<class 'gdb.Value'>
u64
140737488339696
0x7fffffffc2f0
-15632
-15632
140737488339696
0xffffffffffffffff
0xffffffffffffc2f0
```
Seems the `int(value)` works as converting to 32 bit c type int instead of python type int. Maybe we need to use `int(str(value))` to get the correct value.
### Steps to reproduce
<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
Here is an example binary which can reproduce the bug.
[babi.zip](https://github.com/pwndbg/pwndbg/files/3508243/babi.zip)
1. Execute `babi` with gdb & break at `0x5555555D0C97`.
1. Execute the attached `test.py`.
1. Breakpoint will hit with the bug.
*PS: stop in `libc` or `main` function won't see the bug.*
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
```
$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 18.04.2 LTS
Release: 18.04
Codename: bionic
```
```
pwndbg> version
Gdb: 8.1.0.20180409-git
Python: 3.6.8 (default, Jan 14 2019, 11:02:34) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]
Pwndbg: 1.1.0 build: 7f5d8e7
Capstone: 4.0.1024
Unicorn: 1.0.1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/inthook.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 This hook is necessary for compatibility with Python2.7 versions of GDB
5 since they cannot directly cast to integer a gdb.Value object that is
6 not already an integer type.
7 """
8 from __future__ import absolute_import
9 from __future__ import division
10 from __future__ import print_function
11 from __future__ import unicode_literals
12
13 import enum
14 import os
15
16 import gdb
17 import six
18 from future.utils import with_metaclass
19
20 import pwndbg.typeinfo
21
22 if six.PY2:
23 import __builtin__ as builtins
24 else:
25 import builtins
26
27 _int = builtins.int
28
29
30 # We need this class to get isinstance(7, xint) to return True
31 class IsAnInt(type):
32 def __instancecheck__(self, other):
33 return isinstance(other, _int)
34
35
36 class xint(with_metaclass(IsAnInt, builtins.int)):
37 def __new__(cls, value, *a, **kw):
38 if isinstance(value, gdb.Value):
39 if pwndbg.typeinfo.is_pointer(value):
40 value = value.cast(pwndbg.typeinfo.ulong)
41 else:
42 value = value.cast(pwndbg.typeinfo.long)
43
44 elif isinstance(value, gdb.Symbol):
45 symbol = value
46 value = symbol.value()
47 if symbol.is_function:
48 value = value.cast(pwndbg.typeinfo.ulong)
49
50 elif not isinstance(value, (six.string_types, six.integer_types)) \
51 or isinstance(cls, enum.EnumMeta):
52 # without check for EnumMeta math operations with enums were failing e.g.:
53 # pwndbg> py import re; flags = 1 | re.MULTILINE
54 return _int.__new__(cls, value, *a, **kw)
55
56 return _int(_int(value, *a, **kw))
57
58 # Do not hook 'int' if we are just generating documentation
59 if os.environ.get('SPHINX', None) is None:
60 builtins.int = xint
61 globals()['int'] = xint
62 if six.PY3:
63 builtins.long = xint
64 globals()['long'] = xint
65
```
Path: `pwndbg/typeinfo.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Common types, and routines for manually loading types from file
5 via GCC.
6 """
7 from __future__ import absolute_import
8 from __future__ import division
9 from __future__ import print_function
10 from __future__ import unicode_literals
11
12 import glob
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 import gdb
19
20 import pwndbg.events
21 import pwndbg.gcc
22 import pwndbg.memoize
23
24 module = sys.modules[__name__]
25
26
27 def is_pointer(value):
28 type = value
29
30 if isinstance(value, gdb.Value):
31 type = value.type
32
33 type = type.strip_typedefs()
34 return type.code == gdb.TYPE_CODE_PTR
35
36
37 def lookup_types(*types):
38 for type_str in types:
39 try:
40 return gdb.lookup_type(type_str)
41 except Exception as e:
42 exc = e
43 raise exc
44
45
46 @pwndbg.events.new_objfile
47 @pwndbg.events.start
48 @pwndbg.events.stop
49 def update():
50
51 module.char = gdb.lookup_type('char')
52 module.ulong = lookup_types('unsigned long', 'uint', 'u32', 'uint32')
53 module.long = lookup_types('long', 'int', 'i32', 'int32')
54 module.uchar = lookup_types('unsigned char', 'ubyte', 'u8', 'uint8')
55 module.ushort = lookup_types('unsigned short', 'ushort', 'u16', 'uint16')
56 module.uint = lookup_types('unsigned int', 'uint', 'u32', 'uint32')
57 module.void = lookup_types('void', '()')
58 module.uint8 = module.uchar
59 module.uint16 = module.ushort
60 module.uint32 = module.uint
61 module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64', 'uint64')
62
63 module.int8 = lookup_types('char', 'i8', 'int8')
64 module.int16 = lookup_types('short', 'i16', 'int16')
65 module.int32 = lookup_types('int', 'i32', 'int32')
66 module.int64 = lookup_types('long long', 'long', 'i64', 'int64')
67
68 module.ssize_t = module.long
69 module.size_t = module.ulong
70
71 module.pvoid = void.pointer()
72 module.ppvoid = pvoid.pointer()
73 module.pchar = char.pointer()
74
75 module.ptrsize = pvoid.sizeof
76
77 if pvoid.sizeof == 4: module.ptrdiff = uint32
78 if pvoid.sizeof == 8: module.ptrdiff = uint64
79
80 module.null = gdb.Value(0).cast(void)
81
82 # Call it once so we load all of the types
83 update()
84
85 tempdir = tempfile.gettempdir() + '/pwndbg'
86 if not os.path.exists(tempdir):
87 os.mkdir(tempdir)
88
89 # Trial and error until things work
90 blacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',
91 'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',
92 'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',
93 'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',
94 'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',
95 'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',
96 'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',
97 'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',
98 'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',
99 'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']
100
101 def load(name):
102 """Load symbol by name from headers in standard system include directory"""
103 try:
104 return gdb.lookup_type(name)
105 except gdb.error:
106 pass
107
108 # s, _ = gdb.lookup_symbol(name)
109
110 # Try to find an architecture-specific include path
111 arch = pwndbg.arch.current.split(':')[0]
112
113 include_dir = glob.glob('/usr/%s*/include' % arch)
114
115 if include_dir:
116 include_dir = include_dir[0]
117 else:
118 include_dir = '/usr/include'
119
120 source = '#include <fstream>\n'
121
122 for subdir in ['', 'sys', 'netinet']:
123 dirname = os.path.join(include_dir, subdir)
124 for path in glob.glob(os.path.join(dirname, '*.h')):
125 if any(b in path for b in blacklist):
126 continue
127 print(path)
128 source += '#include "%s"\n' % path
129
130
131 source += '''
132 {name} foo;
133 '''.format(**locals())
134
135 filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))
136
137 with open(filename, 'w+') as f:
138 f.write(source)
139 f.flush()
140 os.fsync(f.fileno())
141
142 compile(filename)
143
144 return gdb.lookup_type(name)
145
146 def compile(filename=None, address=0):
147 """Compile and extract symbols from specified file"""
148 if filename is None:
149 print("Specify a filename to compile.")
150 return
151
152 objectname = os.path.splitext(filename)[0] + ".o"
153
154 if not os.path.exists(objectname):
155 gcc = pwndbg.gcc.which()
156 gcc += ['-w', '-c', '-g', filename, '-o', objectname]
157 try:
158 subprocess.check_output(gcc)
159 except subprocess.CalledProcessError as e:
160 return
161
162 add_symbol_file(objectname, address)
163
164 def add_symbol_file(filename=None, address=0):
165 """Read additional symbol table information from the object file filename"""
166 if filename is None:
167 print("Specify a symbol file to add.")
168 return
169
170 with pwndbg.events.Pause():
171 gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)
172
173 def read_gdbvalue(type_name, addr):
174 """ Read the memory contents at addr and interpret them as a GDB value with the given type """
175 gdb_type = pwndbg.typeinfo.load(type_name)
176 return gdb.Value(addr).cast(gdb_type.pointer()).dereference()
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/inthook.py b/pwndbg/inthook.py
--- a/pwndbg/inthook.py
+++ b/pwndbg/inthook.py
@@ -37,15 +37,15 @@
def __new__(cls, value, *a, **kw):
if isinstance(value, gdb.Value):
if pwndbg.typeinfo.is_pointer(value):
- value = value.cast(pwndbg.typeinfo.ulong)
+ value = value.cast(pwndbg.typeinfo.size_t)
else:
- value = value.cast(pwndbg.typeinfo.long)
+ value = value.cast(pwndbg.typeinfo.ssize_t)
elif isinstance(value, gdb.Symbol):
symbol = value
value = symbol.value()
if symbol.is_function:
- value = value.cast(pwndbg.typeinfo.ulong)
+ value = value.cast(pwndbg.typeinfo.size_t)
elif not isinstance(value, (six.string_types, six.integer_types)) \
or isinstance(cls, enum.EnumMeta):
diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py
--- a/pwndbg/typeinfo.py
+++ b/pwndbg/typeinfo.py
@@ -65,18 +65,22 @@
module.int32 = lookup_types('int', 'i32', 'int32')
module.int64 = lookup_types('long long', 'long', 'i64', 'int64')
- module.ssize_t = module.long
- module.size_t = module.ulong
-
module.pvoid = void.pointer()
module.ppvoid = pvoid.pointer()
module.pchar = char.pointer()
module.ptrsize = pvoid.sizeof
- if pvoid.sizeof == 4: module.ptrdiff = uint32
- if pvoid.sizeof == 8: module.ptrdiff = uint64
-
+ if pvoid.sizeof == 4:
+ module.ptrdiff = module.uint32
+ module.size_t = module.uint32
+ module.ssize_t = module.int32
+ elif pvoid.sizeof == 8:
+ module.ptrdiff = module.uint64
+ module.size_t = module.uint64
+ module.ssize_t = module.int64
+ else:
+ raise Exception('Pointer size not supported')
module.null = gdb.Value(0).cast(void)
# Call it once so we load all of the types
|
{"golden_diff": "diff --git a/pwndbg/inthook.py b/pwndbg/inthook.py\n--- a/pwndbg/inthook.py\n+++ b/pwndbg/inthook.py\n@@ -37,15 +37,15 @@\n def __new__(cls, value, *a, **kw):\n if isinstance(value, gdb.Value):\n if pwndbg.typeinfo.is_pointer(value):\n- value = value.cast(pwndbg.typeinfo.ulong)\n+ value = value.cast(pwndbg.typeinfo.size_t)\n else:\n- value = value.cast(pwndbg.typeinfo.long)\n+ value = value.cast(pwndbg.typeinfo.ssize_t)\n \n elif isinstance(value, gdb.Symbol):\n symbol = value\n value = symbol.value()\n if symbol.is_function:\n- value = value.cast(pwndbg.typeinfo.ulong)\n+ value = value.cast(pwndbg.typeinfo.size_t)\n \n elif not isinstance(value, (six.string_types, six.integer_types)) \\\n or isinstance(cls, enum.EnumMeta):\ndiff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py\n--- a/pwndbg/typeinfo.py\n+++ b/pwndbg/typeinfo.py\n@@ -65,18 +65,22 @@\n module.int32 = lookup_types('int', 'i32', 'int32')\n module.int64 = lookup_types('long long', 'long', 'i64', 'int64')\n \n- module.ssize_t = module.long\n- module.size_t = module.ulong\n-\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n \n module.ptrsize = pvoid.sizeof\n \n- if pvoid.sizeof == 4: module.ptrdiff = uint32\n- if pvoid.sizeof == 8: module.ptrdiff = uint64\n-\n+ if pvoid.sizeof == 4: \n+ module.ptrdiff = module.uint32\n+ module.size_t = module.uint32\n+ module.ssize_t = module.int32\n+ elif pvoid.sizeof == 8: \n+ module.ptrdiff = module.uint64\n+ module.size_t = module.uint64\n+ module.ssize_t = module.int64\n+ else:\n+ raise Exception('Pointer size not supported')\n module.null = gdb.Value(0).cast(void)\n \n # Call it once so we load all of the types\n", "issue": "Incorrect conversion from gdb.Value to python int\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\n\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\nWhen debugging certain binary, pwndbg may show incorrect register value, missing high 32 bit for 64 bit register.\r\ne.g.\r\n\r\n```\r\npwndbg> regs rax\r\n RAX 0x555c4160\r\npwndbg> i r rax\r\nrax 0x5555555c4160 93824992690528\r\npwndbg> telescope $rax\r\n<Could not read memory at 0x555c4160>\r\npwndbg> regs rsp\r\n RSP 0xffffffffffffc2f0\r\npwndbg> i r rsp\r\nrsp 0x7fffffffc2f0 0x7fffffffc2f0\r\npwndbg> telescope $rsp\r\n<Could not read memory at 0xffffc2f0>\r\npwndbg>\r\n```\r\n\r\n**If I strip the binary, everything goes well.**\r\n\r\nAfter checking code of pwndbg, I found that the bug happens when trying to convert a `gdb.Value` instance to python `int`.\r\n\r\n```\r\npwndbg> python\r\n>print(hex(pwndbg.regs.rsp))\r\n>rsp = pwndbg.regs.value\r\n>print(type(rsp))\r\n>print(rsp.type)\r\n>print(rsp)\r\n>print(hex(rsp))\r\n>print(int(rsp))\r\n>print(long(rsp))\r\n>print(str(rsp))\r\n>print(hex(pwndbg.arch.ptrmask))\r\n>print(hex(int(rsp) & pwndbg.arch.ptrmask))\r\n>end\r\n0xffffffffffffc2f0\r\n<class 'gdb.Value'>\r\nu64\r\n140737488339696\r\n0x7fffffffc2f0\r\n-15632\r\n-15632\r\n140737488339696\r\n0xffffffffffffffff\r\n0xffffffffffffc2f0\r\n```\r\n\r\nSeems the `int(value)` works as converting to 32 bit c type int instead of python type int. Maybe we need to use `int(str(value))` to get the correct value.\r\n\r\n### Steps to reproduce\r\n\r\n<!--\r\nWhat do we have to do to reproduce the problem?\r\nIf this is connected to particular C/asm code, \r\nplease provide the smallest C code that reproduces the issue.\r\n-->\r\n\r\nHere is an example binary which can reproduce the bug.\r\n\r\n[babi.zip](https://github.com/pwndbg/pwndbg/files/3508243/babi.zip)\r\n\r\n1. Execute `babi` with gdb & break at `0x5555555D0C97`.\r\n1. Execute the attached `test.py`.\r\n1. Breakpoint will hit with the bug.\r\n\r\n*PS: stop in `libc` or `main` function won't see the bug.*\r\n\r\n### My setup\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n\r\n```\r\n$ lsb_release -a\r\nNo LSB modules are available.\r\nDistributor ID: Ubuntu\r\nDescription: Ubuntu 18.04.2 LTS\r\nRelease: 18.04\r\nCodename: bionic\r\n```\r\n\r\n```\r\npwndbg> version\r\nGdb: 8.1.0.20180409-git\r\nPython: 3.6.8 (default, Jan 14 2019, 11:02:34) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]\r\nPwndbg: 1.1.0 build: 7f5d8e7\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis hook is necessary for compatibility with Python2.7 versions of GDB\nsince they cannot directly cast to integer a gdb.Value object that is\nnot already an integer type.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport enum\nimport os\n\nimport gdb\nimport six\nfrom future.utils import with_metaclass\n\nimport pwndbg.typeinfo\n\nif six.PY2:\n import __builtin__ as builtins\nelse:\n import builtins\n\n_int = builtins.int\n\n\n# We need this class to get isinstance(7, xint) to return True\nclass IsAnInt(type):\n def __instancecheck__(self, other):\n return isinstance(other, _int)\n\n\nclass xint(with_metaclass(IsAnInt, builtins.int)):\n def __new__(cls, value, *a, **kw):\n if isinstance(value, gdb.Value):\n if pwndbg.typeinfo.is_pointer(value):\n value = value.cast(pwndbg.typeinfo.ulong)\n else:\n value = value.cast(pwndbg.typeinfo.long)\n\n elif isinstance(value, gdb.Symbol):\n symbol = value\n value = symbol.value()\n if symbol.is_function:\n value = value.cast(pwndbg.typeinfo.ulong)\n\n elif not isinstance(value, (six.string_types, six.integer_types)) \\\n or isinstance(cls, enum.EnumMeta):\n # without check for EnumMeta math operations with enums were failing e.g.:\n # pwndbg> py import re; flags = 1 | re.MULTILINE\n return _int.__new__(cls, value, *a, **kw)\n\n return _int(_int(value, *a, **kw))\n\n# Do not hook 'int' if we are just generating documentation\nif os.environ.get('SPHINX', None) is None:\n builtins.int = xint\n globals()['int'] = xint\n if six.PY3:\n builtins.long = xint\n globals()['long'] = xint\n", "path": "pwndbg/inthook.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]_objfile\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32', 'uint32')\n module.long = lookup_types('long', 'int', 'i32', 'int32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8', 'uint8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16', 'uint16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32', 'uint32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64', 'uint64')\n\n module.int8 = lookup_types('char', 'i8', 'int8')\n module.int16 = lookup_types('short', 'i16', 'int16')\n module.int32 = lookup_types('int', 'i32', 'int32')\n module.int64 = lookup_types('long long', 'long', 'i64', 'int64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis hook is necessary for compatibility with Python2.7 versions of GDB\nsince they cannot directly cast to integer a gdb.Value object that is\nnot already an integer type.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport enum\nimport os\n\nimport gdb\nimport six\nfrom future.utils import with_metaclass\n\nimport pwndbg.typeinfo\n\nif six.PY2:\n import __builtin__ as builtins\nelse:\n import builtins\n\n_int = builtins.int\n\n\n# We need this class to get isinstance(7, xint) to return True\nclass IsAnInt(type):\n def __instancecheck__(self, other):\n return isinstance(other, _int)\n\n\nclass xint(with_metaclass(IsAnInt, builtins.int)):\n def __new__(cls, value, *a, **kw):\n if isinstance(value, gdb.Value):\n if pwndbg.typeinfo.is_pointer(value):\n value = value.cast(pwndbg.typeinfo.size_t)\n else:\n value = value.cast(pwndbg.typeinfo.ssize_t)\n\n elif isinstance(value, gdb.Symbol):\n symbol = value\n value = symbol.value()\n if symbol.is_function:\n value = value.cast(pwndbg.typeinfo.size_t)\n\n elif not isinstance(value, (six.string_types, six.integer_types)) \\\n or isinstance(cls, enum.EnumMeta):\n # without check for EnumMeta math operations with enums were failing e.g.:\n # pwndbg> py import re; flags = 1 | re.MULTILINE\n return _int.__new__(cls, value, *a, **kw)\n\n return _int(_int(value, *a, **kw))\n\n# Do not hook 'int' if we are just generating documentation\nif os.environ.get('SPHINX', None) is None:\n builtins.int = xint\n globals()['int'] = xint\n if six.PY3:\n builtins.long = xint\n globals()['long'] = xint\n", "path": "pwndbg/inthook.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]_objfile\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32', 'uint32')\n module.long = lookup_types('long', 'int', 'i32', 'int32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8', 'uint8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16', 'uint16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32', 'uint32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64', 'uint64')\n\n module.int8 = lookup_types('char', 'i8', 'int8')\n module.int16 = lookup_types('short', 'i16', 'int16')\n module.int32 = lookup_types('int', 'i32', 'int32')\n module.int64 = lookup_types('long long', 'long', 'i64', 'int64')\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: \n module.ptrdiff = module.uint32\n module.size_t = module.uint32\n module.ssize_t = module.int32\n elif pvoid.sizeof == 8: \n module.ptrdiff = module.uint64\n module.size_t = module.uint64\n module.ssize_t = module.int64\n else:\n raise Exception('Pointer size not supported')\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py"}]}
| 3,908 | 555 |
gh_patches_debug_16795
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3042
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[chatgpt] change critic input as state
> ## 📌 Checklist before creating the PR
> * [x] I have created an issue for this PR for traceability
> * [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
> * [ ] I have added relevant tags if possible for us to better distinguish different PRs
>
> ## 🚨 Issue number
> > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
> > e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
> > fixed #3042
>
> ## 📝 What does this PR do?
> > Summarize your work here.
> > if you have any plots/diagrams/screenshots/tables, please attach them here.
>
> This commit fix chatgpt critic input as state according to A2C RL algorithm.
>
> ## 💥 Checklist before requesting a review
> * [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
> * [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
> * [x] I have performed a self-review of my code
> * [ ] I have added thorough tests.
> * [ ] I have added docstrings for all the functions/methods I implemented
>
> ## ⭐️ Do you enjoy contributing to Colossal-AI?
> * [x] 🌝 Yes, I do.
> * [ ] 🌚 No, I don't.
>
> Tell us more if you don't enjoy contributing to Colossal-AI.
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/chatgpt/models/base/critic.py`
Content:
```
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5
6 from ..lora import LoRAModule
7 from ..utils import masked_mean
8
9
10 class Critic(LoRAModule):
11 """
12 Critic model base class.
13
14 Args:
15 model (nn.Module): Critic model.
16 value_head (nn.Module): Value head to get value.
17 lora_rank (int): LoRA rank.
18 lora_train_bias (str): LoRA bias training mode.
19 """
20
21 def __init__(self,
22 model: nn.Module,
23 value_head: nn.Module,
24 lora_rank: int = 0,
25 lora_train_bias: str = 'none') -> None:
26
27 super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
28 self.model = model
29 self.value_head = value_head
30 self.convert_to_lora()
31
32 def forward(self,
33 sequences: torch.LongTensor,
34 action_mask: Optional[torch.Tensor] = None,
35 attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
36 outputs = self.model(sequences, attention_mask=attention_mask)
37 last_hidden_states = outputs['last_hidden_state']
38
39 values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]
40
41 if action_mask is not None:
42 num_actions = action_mask.size(1)
43 values = values[:, -num_actions:]
44 value = masked_mean(values, action_mask, dim=1)
45 return value
46 value = values.mean(dim=1).squeeze(1)
47 return value
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/applications/ChatGPT/chatgpt/models/base/critic.py b/applications/ChatGPT/chatgpt/models/base/critic.py
--- a/applications/ChatGPT/chatgpt/models/base/critic.py
+++ b/applications/ChatGPT/chatgpt/models/base/critic.py
@@ -36,12 +36,15 @@
outputs = self.model(sequences, attention_mask=attention_mask)
last_hidden_states = outputs['last_hidden_state']
- values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]
+ values = self.value_head(last_hidden_states).squeeze(-1)
if action_mask is not None:
num_actions = action_mask.size(1)
- values = values[:, -num_actions:]
- value = masked_mean(values, action_mask, dim=1)
+ prompt_mask = attention_mask[:, :-num_actions]
+ values = values[:, :-num_actions]
+ value = masked_mean(values, prompt_mask, dim=1)
return value
+
+ values = values[:, :-1]
value = values.mean(dim=1).squeeze(1)
return value
|
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/base/critic.py b/applications/ChatGPT/chatgpt/models/base/critic.py\n--- a/applications/ChatGPT/chatgpt/models/base/critic.py\n+++ b/applications/ChatGPT/chatgpt/models/base/critic.py\n@@ -36,12 +36,15 @@\n outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n \n- values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]\n+ values = self.value_head(last_hidden_states).squeeze(-1)\n \n if action_mask is not None:\n num_actions = action_mask.size(1)\n- values = values[:, -num_actions:]\n- value = masked_mean(values, action_mask, dim=1)\n+ prompt_mask = attention_mask[:, :-num_actions]\n+ values = values[:, :-num_actions]\n+ value = masked_mean(values, prompt_mask, dim=1)\n return value\n+\n+ values = values[:, :-1]\n value = values.mean(dim=1).squeeze(1)\n return value\n", "issue": "[chatgpt] change critic input as state\n> ## \ud83d\udccc Checklist before creating the PR\r\n> * [x] I have created an issue for this PR for traceability\r\n> * [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`\r\n> * [ ] I have added relevant tags if possible for us to better distinguish different PRs\r\n> \r\n> ## \ud83d\udea8 Issue number\r\n> > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge\r\n> > e.g. `fixed #1234`, `closed #1234`, `resolved #1234`\r\n> > fixed #3042\r\n> \r\n> ## \ud83d\udcdd What does this PR do?\r\n> > Summarize your work here.\r\n> > if you have any plots/diagrams/screenshots/tables, please attach them here.\r\n> \r\n> This commit fix chatgpt critic input as state according to A2C RL algorithm.\r\n> \r\n> ## \ud83d\udca5 Checklist before requesting a review\r\n> * [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))\r\n> * [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible\r\n> * [x] I have performed a self-review of my code\r\n> * [ ] I have added thorough tests.\r\n> * [ ] I have added docstrings for all the functions/methods I implemented\r\n> \r\n> ## \u2b50\ufe0f Do you enjoy contributing to Colossal-AI?\r\n> * [x] \ud83c\udf1d Yes, I do.\r\n> * [ ] \ud83c\udf1a No, I don't.\r\n> \r\n> Tell us more if you don't enjoy contributing to Colossal-AI.\r\n\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom ..lora import LoRAModule\nfrom ..utils import masked_mean\n\n\nclass Critic(LoRAModule):\n \"\"\"\n Critic model base class.\n\n Args:\n model (nn.Module): Critic model.\n value_head (nn.Module): Value head to get value.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n value_head: nn.Module,\n lora_rank: int = 0,\n lora_train_bias: str = 'none') -> None:\n\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.model = model\n self.value_head = value_head\n self.convert_to_lora()\n\n def forward(self,\n sequences: torch.LongTensor,\n action_mask: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n\n values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]\n\n if action_mask is not None:\n num_actions = action_mask.size(1)\n values = values[:, -num_actions:]\n value = masked_mean(values, action_mask, dim=1)\n return value\n value = values.mean(dim=1).squeeze(1)\n return value\n", "path": "applications/ChatGPT/chatgpt/models/base/critic.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom ..lora import LoRAModule\nfrom ..utils import masked_mean\n\n\nclass Critic(LoRAModule):\n \"\"\"\n Critic model base class.\n\n Args:\n model (nn.Module): Critic model.\n value_head (nn.Module): Value head to get value.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n value_head: nn.Module,\n lora_rank: int = 0,\n lora_train_bias: str = 'none') -> None:\n\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.model = model\n self.value_head = value_head\n self.convert_to_lora()\n\n def forward(self,\n sequences: torch.LongTensor,\n action_mask: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n\n values = self.value_head(last_hidden_states).squeeze(-1)\n\n if action_mask is not None:\n num_actions = action_mask.size(1)\n prompt_mask = attention_mask[:, :-num_actions]\n values = values[:, :-num_actions]\n value = masked_mean(values, prompt_mask, dim=1)\n return value\n\n values = values[:, :-1]\n value = values.mean(dim=1).squeeze(1)\n return value\n", "path": "applications/ChatGPT/chatgpt/models/base/critic.py"}]}
| 1,120 | 254 |
gh_patches_debug_10448
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-176
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Chardet fails on Slovenian characters
Preprocess Text fails with Slovenian stopword list. Seems like a chardet issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/preprocess/filter.py`
Content:
```
1 import os
2
3 import re
4 from gensim import corpora
5 from nltk.corpus import stopwords
6
7 __all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter', 'RegexpFilter', 'FrequencyFilter']
8
9
10 class BaseTokenFilter:
11 name = NotImplemented
12
13 def __call__(self, corpus):
14 if len(corpus) == 0:
15 return corpus
16 if isinstance(corpus[0], str):
17 return self.filter(corpus)
18 return [self.filter(tokens) for tokens in corpus]
19
20 def filter(self, tokens):
21 return list(filter(self.check, tokens))
22
23 def check(self, token):
24 raise NotImplementedError
25
26 def __str__(self):
27 return self.name
28
29 def set_up(self):
30 """ A method for setting filters up before every __call__. """
31 pass
32
33 def tear_down(self):
34 """ A method for cleaning up after every __call__. """
35 pass
36
37
38 class WordListMixin:
39 def __init__(self, word_list=None):
40 self.file_path = None
41 self.word_list = word_list or []
42
43 def from_file(self, path):
44 self.file_path = path
45 if not path:
46 self.word_list = []
47 else:
48 with open(path) as f:
49 self.word_list = set([line.strip() for line in f])
50
51
52 class StopwordsFilter(BaseTokenFilter, WordListMixin):
53 """ Remove tokens present in NLTK's language specific lists or a file. """
54 name = 'Stopwords'
55
56 supported_languages = [file.capitalize() for file in os.listdir(stopwords._get_root())
57 if file.islower()]
58
59 def __init__(self, language='English', word_list=None):
60 WordListMixin.__init__(self, word_list)
61 super().__init__()
62 self.language = language
63
64 @property
65 def language(self):
66 return self._language
67
68 @language.setter
69 def language(self, value):
70 self._language = value
71 if not self._language:
72 self.stopwords = []
73 else:
74 self.stopwords = set(stopwords.words(self.language.lower()))
75
76 def __str__(self):
77 config = ''
78 config += 'Language: {}, '.format(self.language.capitalize()) if self.language else ''
79 config += 'File: {}, '.format(self.file_path) if self.file_path else ''
80 return '{} ({})'.format(self.name, config.strip(', '))
81
82 def check(self, token):
83 return token not in self.stopwords and token not in self.word_list
84
85
86 class LexiconFilter(BaseTokenFilter, WordListMixin):
87 """ Keep only tokens present in a file. """
88 name = 'Lexicon'
89
90 def __init__(self, lexicon=None):
91 WordListMixin.__init__(self, word_list=lexicon)
92
93 @property
94 def lexicon(self):
95 return self.word_list
96
97 @lexicon.setter
98 def lexicon(self, value):
99 self.word_list = set(value)
100
101 def check(self, token):
102 return not self.lexicon or token in self.lexicon
103
104 def __str__(self):
105 return '{} ({})'.format(self.name, 'File: {}'.format(self.file_path))
106
107
108 class RegexpFilter(BaseTokenFilter):
109 """ Remove tokens matching this regular expressions. """
110 name = 'Regexp'
111
112 def __init__(self, pattern=r'\.|,|:|!|\?'):
113 self._pattern = pattern
114 # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able
115 # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).
116 # To bypass the problem regex is compiled before every __call__ and discarded right after.
117 self.regex = None
118 self.set_up()
119
120 @property
121 def pattern(self):
122 return self._pattern
123
124 @pattern.setter
125 def pattern(self, value):
126 self._pattern = value
127 self.set_up()
128
129 @staticmethod
130 def validate_regexp(regexp):
131 try:
132 re.compile(regexp)
133 return True
134 except re.error:
135 return False
136
137 def check(self, token):
138 return not self.regex.match(token)
139
140 def __str__(self):
141 return '{} ({})'.format(self.name, self.pattern)
142
143 def set_up(self):
144 """ Compile Regex before the __call__. """
145 self.regex = re.compile(self.pattern)
146
147 def tear_down(self):
148 """ Delete Regex after every __call__. """
149 self.regex = None
150
151
152 class FrequencyFilter(LexiconFilter):
153 """Remove tokens with document frequency outside this range;
154 use either absolute or relative frequency. """
155 name = 'Document frequency'
156
157 def __init__(self, min_df=0., max_df=1., keep_n=None):
158 super().__init__()
159 self._corpus_len = 0
160 self.keep_n = keep_n
161 self._max_df = max_df
162 self._min_df = min_df
163
164 def fit_filter(self, corpus):
165 self._corpus_len = len(corpus)
166 tokens = getattr(corpus, 'tokens', corpus)
167 dictionary = corpora.Dictionary(tokens)
168 dictionary.filter_extremes(self.min_df, self.max_df, self.keep_n)
169 self.lexicon = dictionary.token2id.keys()
170 return self(tokens), dictionary
171
172 @property
173 def max_df(self):
174 if isinstance(self._max_df, int):
175 return self._max_df / self._corpus_len if self._corpus_len else 1.
176 else:
177 return self._max_df
178
179 @max_df.setter
180 def max_df(self, value):
181 self._max_df = value
182
183 @property
184 def min_df(self):
185 if isinstance(self._min_df, float):
186 return int(self._corpus_len * self._min_df) or 1
187 else:
188 return self._min_df
189
190 @min_df.setter
191 def min_df(self, value):
192 self._min_df = value
193
194 def __str__(self):
195 keep = ', keep {}'.format(self.keep_n) if self.keep_n else ''
196 return "{} (range [{}, {}]{})".format(self.name, self._min_df,
197 self._max_df, keep)
198
199 def check(self, token):
200 return token in self.lexicon
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/preprocess/filter.py b/orangecontrib/text/preprocess/filter.py
--- a/orangecontrib/text/preprocess/filter.py
+++ b/orangecontrib/text/preprocess/filter.py
@@ -1,6 +1,7 @@
import os
-
import re
+
+from Orange.data.io import detect_encoding
from gensim import corpora
from nltk.corpus import stopwords
@@ -45,7 +46,8 @@
if not path:
self.word_list = []
else:
- with open(path) as f:
+ enc = detect_encoding(path)
+ with open(path, encoding=enc) as f:
self.word_list = set([line.strip() for line in f])
|
{"golden_diff": "diff --git a/orangecontrib/text/preprocess/filter.py b/orangecontrib/text/preprocess/filter.py\n--- a/orangecontrib/text/preprocess/filter.py\n+++ b/orangecontrib/text/preprocess/filter.py\n@@ -1,6 +1,7 @@\n import os\n-\n import re\n+\n+from Orange.data.io import detect_encoding\n from gensim import corpora\n from nltk.corpus import stopwords\n \n@@ -45,7 +46,8 @@\n if not path:\n self.word_list = []\n else:\n- with open(path) as f:\n+ enc = detect_encoding(path)\n+ with open(path, encoding=enc) as f:\n self.word_list = set([line.strip() for line in f])\n", "issue": "Chardet fails on Slovenian characters\nPreprocess Text fails with Slovenian stopword list. Seems like a chardet issue.\n", "before_files": [{"content": "import os\n\nimport re\nfrom gensim import corpora\nfrom nltk.corpus import stopwords\n\n__all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter', 'RegexpFilter', 'FrequencyFilter']\n\n\nclass BaseTokenFilter:\n name = NotImplemented\n\n def __call__(self, corpus):\n if len(corpus) == 0:\n return corpus\n if isinstance(corpus[0], str):\n return self.filter(corpus)\n return [self.filter(tokens) for tokens in corpus]\n\n def filter(self, tokens):\n return list(filter(self.check, tokens))\n\n def check(self, token):\n raise NotImplementedError\n\n def __str__(self):\n return self.name\n\n def set_up(self):\n \"\"\" A method for setting filters up before every __call__. \"\"\"\n pass\n\n def tear_down(self):\n \"\"\" A method for cleaning up after every __call__. \"\"\"\n pass\n\n\nclass WordListMixin:\n def __init__(self, word_list=None):\n self.file_path = None\n self.word_list = word_list or []\n\n def from_file(self, path):\n self.file_path = path\n if not path:\n self.word_list = []\n else:\n with open(path) as f:\n self.word_list = set([line.strip() for line in f])\n\n\nclass StopwordsFilter(BaseTokenFilter, WordListMixin):\n \"\"\" Remove tokens present in NLTK's language specific lists or a file. \"\"\"\n name = 'Stopwords'\n\n supported_languages = [file.capitalize() for file in os.listdir(stopwords._get_root())\n if file.islower()]\n\n def __init__(self, language='English', word_list=None):\n WordListMixin.__init__(self, word_list)\n super().__init__()\n self.language = language\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n if not self._language:\n self.stopwords = []\n else:\n self.stopwords = set(stopwords.words(self.language.lower()))\n\n def __str__(self):\n config = ''\n config += 'Language: {}, '.format(self.language.capitalize()) if self.language else ''\n config += 'File: {}, '.format(self.file_path) if self.file_path else ''\n return '{} ({})'.format(self.name, config.strip(', '))\n\n def check(self, token):\n return token not in self.stopwords and token not in self.word_list\n\n\nclass LexiconFilter(BaseTokenFilter, WordListMixin):\n \"\"\" Keep only tokens present in a file. \"\"\"\n name = 'Lexicon'\n\n def __init__(self, lexicon=None):\n WordListMixin.__init__(self, word_list=lexicon)\n\n @property\n def lexicon(self):\n return self.word_list\n\n @lexicon.setter\n def lexicon(self, value):\n self.word_list = set(value)\n\n def check(self, token):\n return not self.lexicon or token in self.lexicon\n\n def __str__(self):\n return '{} ({})'.format(self.name, 'File: {}'.format(self.file_path))\n\n\nclass RegexpFilter(BaseTokenFilter):\n \"\"\" Remove tokens matching this regular expressions. \"\"\"\n name = 'Regexp'\n\n def __init__(self, pattern=r'\\.|,|:|!|\\?'):\n self._pattern = pattern\n # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able\n # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).\n # To bypass the problem regex is compiled before every __call__ and discarded right after.\n self.regex = None\n self.set_up()\n\n @property\n def pattern(self):\n return self._pattern\n\n @pattern.setter\n def pattern(self, value):\n self._pattern = value\n self.set_up()\n\n @staticmethod\n def validate_regexp(regexp):\n try:\n re.compile(regexp)\n return True\n except re.error:\n return False\n\n def check(self, token):\n return not self.regex.match(token)\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.pattern)\n\n def set_up(self):\n \"\"\" Compile Regex before the __call__. \"\"\"\n self.regex = re.compile(self.pattern)\n\n def tear_down(self):\n \"\"\" Delete Regex after every __call__. \"\"\"\n self.regex = None\n\n\nclass FrequencyFilter(LexiconFilter):\n \"\"\"Remove tokens with document frequency outside this range;\n use either absolute or relative frequency. \"\"\"\n name = 'Document frequency'\n\n def __init__(self, min_df=0., max_df=1., keep_n=None):\n super().__init__()\n self._corpus_len = 0\n self.keep_n = keep_n\n self._max_df = max_df\n self._min_df = min_df\n\n def fit_filter(self, corpus):\n self._corpus_len = len(corpus)\n tokens = getattr(corpus, 'tokens', corpus)\n dictionary = corpora.Dictionary(tokens)\n dictionary.filter_extremes(self.min_df, self.max_df, self.keep_n)\n self.lexicon = dictionary.token2id.keys()\n return self(tokens), dictionary\n\n @property\n def max_df(self):\n if isinstance(self._max_df, int):\n return self._max_df / self._corpus_len if self._corpus_len else 1.\n else:\n return self._max_df\n\n @max_df.setter\n def max_df(self, value):\n self._max_df = value\n\n @property\n def min_df(self):\n if isinstance(self._min_df, float):\n return int(self._corpus_len * self._min_df) or 1\n else:\n return self._min_df\n\n @min_df.setter\n def min_df(self, value):\n self._min_df = value\n\n def __str__(self):\n keep = ', keep {}'.format(self.keep_n) if self.keep_n else ''\n return \"{} (range [{}, {}]{})\".format(self.name, self._min_df,\n self._max_df, keep)\n\n def check(self, token):\n return token in self.lexicon\n", "path": "orangecontrib/text/preprocess/filter.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom Orange.data.io import detect_encoding\nfrom gensim import corpora\nfrom nltk.corpus import stopwords\n\n__all__ = ['BaseTokenFilter', 'StopwordsFilter', 'LexiconFilter', 'RegexpFilter', 'FrequencyFilter']\n\n\nclass BaseTokenFilter:\n name = NotImplemented\n\n def __call__(self, corpus):\n if len(corpus) == 0:\n return corpus\n if isinstance(corpus[0], str):\n return self.filter(corpus)\n return [self.filter(tokens) for tokens in corpus]\n\n def filter(self, tokens):\n return list(filter(self.check, tokens))\n\n def check(self, token):\n raise NotImplementedError\n\n def __str__(self):\n return self.name\n\n def set_up(self):\n \"\"\" A method for setting filters up before every __call__. \"\"\"\n pass\n\n def tear_down(self):\n \"\"\" A method for cleaning up after every __call__. \"\"\"\n pass\n\n\nclass WordListMixin:\n def __init__(self, word_list=None):\n self.file_path = None\n self.word_list = word_list or []\n\n def from_file(self, path):\n self.file_path = path\n if not path:\n self.word_list = []\n else:\n enc = detect_encoding(path)\n with open(path, encoding=enc) as f:\n self.word_list = set([line.strip() for line in f])\n\n\nclass StopwordsFilter(BaseTokenFilter, WordListMixin):\n \"\"\" Remove tokens present in NLTK's language specific lists or a file. \"\"\"\n name = 'Stopwords'\n\n supported_languages = [file.capitalize() for file in os.listdir(stopwords._get_root())\n if file.islower()]\n\n def __init__(self, language='English', word_list=None):\n WordListMixin.__init__(self, word_list)\n super().__init__()\n self.language = language\n\n @property\n def language(self):\n return self._language\n\n @language.setter\n def language(self, value):\n self._language = value\n if not self._language:\n self.stopwords = []\n else:\n self.stopwords = set(stopwords.words(self.language.lower()))\n\n def __str__(self):\n config = ''\n config += 'Language: {}, '.format(self.language.capitalize()) if self.language else ''\n config += 'File: {}, '.format(self.file_path) if self.file_path else ''\n return '{} ({})'.format(self.name, config.strip(', '))\n\n def check(self, token):\n return token not in self.stopwords and token not in self.word_list\n\n\nclass LexiconFilter(BaseTokenFilter, WordListMixin):\n \"\"\" Keep only tokens present in a file. \"\"\"\n name = 'Lexicon'\n\n def __init__(self, lexicon=None):\n WordListMixin.__init__(self, word_list=lexicon)\n\n @property\n def lexicon(self):\n return self.word_list\n\n @lexicon.setter\n def lexicon(self, value):\n self.word_list = set(value)\n\n def check(self, token):\n return not self.lexicon or token in self.lexicon\n\n def __str__(self):\n return '{} ({})'.format(self.name, 'File: {}'.format(self.file_path))\n\n\nclass RegexpFilter(BaseTokenFilter):\n \"\"\" Remove tokens matching this regular expressions. \"\"\"\n name = 'Regexp'\n\n def __init__(self, pattern=r'\\.|,|:|!|\\?'):\n self._pattern = pattern\n # Compiled Regexes are NOT deepcopy-able and hence to make Corpus deepcopy-able\n # we cannot store then (due to Corpus also storing used_preprocessor for BoW compute values).\n # To bypass the problem regex is compiled before every __call__ and discarded right after.\n self.regex = None\n self.set_up()\n\n @property\n def pattern(self):\n return self._pattern\n\n @pattern.setter\n def pattern(self, value):\n self._pattern = value\n self.set_up()\n\n @staticmethod\n def validate_regexp(regexp):\n try:\n re.compile(regexp)\n return True\n except re.error:\n return False\n\n def check(self, token):\n return not self.regex.match(token)\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.pattern)\n\n def set_up(self):\n \"\"\" Compile Regex before the __call__. \"\"\"\n self.regex = re.compile(self.pattern)\n\n def tear_down(self):\n \"\"\" Delete Regex after every __call__. \"\"\"\n self.regex = None\n\n\nclass FrequencyFilter(LexiconFilter):\n \"\"\"Remove tokens with document frequency outside this range;\n use either absolute or relative frequency. \"\"\"\n name = 'Document frequency'\n\n def __init__(self, min_df=0., max_df=1., keep_n=None):\n super().__init__()\n self._corpus_len = 0\n self.keep_n = keep_n\n self._max_df = max_df\n self._min_df = min_df\n\n def fit_filter(self, corpus):\n self._corpus_len = len(corpus)\n tokens = getattr(corpus, 'tokens', corpus)\n dictionary = corpora.Dictionary(tokens)\n dictionary.filter_extremes(self.min_df, self.max_df, self.keep_n)\n self.lexicon = dictionary.token2id.keys()\n return self(tokens), dictionary\n\n @property\n def max_df(self):\n if isinstance(self._max_df, int):\n return self._max_df / self._corpus_len if self._corpus_len else 1.\n else:\n return self._max_df\n\n @max_df.setter\n def max_df(self, value):\n self._max_df = value\n\n @property\n def min_df(self):\n if isinstance(self._min_df, float):\n return int(self._corpus_len * self._min_df) or 1\n else:\n return self._min_df\n\n @min_df.setter\n def min_df(self, value):\n self._min_df = value\n\n def __str__(self):\n keep = ', keep {}'.format(self.keep_n) if self.keep_n else ''\n return \"{} (range [{}, {}]{})\".format(self.name, self._min_df,\n self._max_df, keep)\n\n def check(self, token):\n return token in self.lexicon\n", "path": "orangecontrib/text/preprocess/filter.py"}]}
| 2,156 | 156 |
gh_patches_debug_36284
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2966
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider albert_heijn is broken
During the global build at 2021-06-02-14-42-40, spider **albert_heijn** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/albert_heijn.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/albert_heijn.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5
6 class AlbertHeijnSpider(scrapy.Spider):
7 name = 'albert_heijn'
8 item_attributes = {'brand': "Albert Heijn"}
9 allowed_domains = ['www.ah.nl']
10
11 def start_requests(self):
12 url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'
13 yield scrapy.Request(url, callback=self.parse)
14
15 def parse(self, response):
16 stores = json.loads(response.body_as_unicode())
17 for store in stores['stores']:
18 try:
19 phone_number = store['phoneNumber']
20 except:
21 phone_number = ""
22 yield GeojsonPointItem(
23 lat=store['lat'],
24 lon=store['lng'],
25 addr_full="%s %s" % (store['street'], store["housenr"]),
26 city=store['city'],
27 phone=phone_number,
28 state="",
29 postcode=store['zip'],
30 ref=store['no'],
31 country="Netherlands",
32 website="https://www.ah.nl/winkel/albert-heijn/%s/%s/%s" % (store['city'], store['street'], store['no'])
33 )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/albert_heijn.py b/locations/spiders/albert_heijn.py
--- a/locations/spiders/albert_heijn.py
+++ b/locations/spiders/albert_heijn.py
@@ -1,33 +1,53 @@
# -*- coding: utf-8 -*-
+import json
+import re
+
import scrapy
+
+from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
-import json
-class AlbertHeijnSpider(scrapy.Spider):
- name = 'albert_heijn'
- item_attributes = {'brand': "Albert Heijn"}
- allowed_domains = ['www.ah.nl']
- def start_requests(self):
- url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'
- yield scrapy.Request(url, callback=self.parse)
+class AlbertHeijnSpider(scrapy.Spider):
+ name = "albert_heijn"
+ item_attributes = {"brand": "Albert Heijn", "brand_wikidata": "Q1653985"}
+ allowed_domains = ["www.ah.nl", "www.ah.be"]
+ start_urls = (
+ "https://www.ah.nl/sitemaps/entities/stores/stores.xml",
+ "https://www.ah.be/sitemaps/entities/stores/stores.xml",
+ )
def parse(self, response):
- stores = json.loads(response.body_as_unicode())
- for store in stores['stores']:
- try:
- phone_number = store['phoneNumber']
- except:
- phone_number = ""
- yield GeojsonPointItem(
- lat=store['lat'],
- lon=store['lng'],
- addr_full="%s %s" % (store['street'], store["housenr"]),
- city=store['city'],
- phone=phone_number,
- state="",
- postcode=store['zip'],
- ref=store['no'],
- country="Netherlands",
- website="https://www.ah.nl/winkel/albert-heijn/%s/%s/%s" % (store['city'], store['street'], store['no'])
- )
+ response.selector.remove_namespaces()
+ for url in response.xpath("//loc/text()").extract():
+ if re.search("/winkel/albert-heijn/", url):
+ yield scrapy.Request(url, callback=self.parse_store)
+
+ def parse_store(self, response):
+ for ldjson in response.xpath(
+ '//script[@type="application/ld+json"]/text()'
+ ).extract():
+ data = json.loads(ldjson)
+ if data["@type"] != "GroceryStore":
+ continue
+
+ opening_hours = OpeningHours()
+ for spec in data["openingHoursSpecification"]:
+ opening_hours.add_range(
+ spec["dayOfWeek"][:2], spec["opens"], spec["closes"]
+ )
+
+ properties = {
+ "ref": response.url,
+ "website": response.url,
+ "name": data["name"],
+ "phone": data["telephone"],
+ "lat": data["geo"]["latitude"],
+ "lon": data["geo"]["longitude"],
+ "addr_full": data["address"]["streetAddress"],
+ "city": data["address"]["addressLocality"],
+ "postcode": data["address"]["postalCode"],
+ "country": data["address"]["addressCountry"],
+ "opening_hours": opening_hours.as_opening_hours(),
+ }
+ yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/albert_heijn.py b/locations/spiders/albert_heijn.py\n--- a/locations/spiders/albert_heijn.py\n+++ b/locations/spiders/albert_heijn.py\n@@ -1,33 +1,53 @@\n # -*- coding: utf-8 -*-\n+import json\n+import re\n+\n import scrapy\n+\n+from locations.hours import OpeningHours\n from locations.items import GeojsonPointItem\n-import json\n \n-class AlbertHeijnSpider(scrapy.Spider):\n- name = 'albert_heijn'\n- item_attributes = {'brand': \"Albert Heijn\"}\n- allowed_domains = ['www.ah.nl']\n \n- def start_requests(self):\n- url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'\n- yield scrapy.Request(url, callback=self.parse)\n+class AlbertHeijnSpider(scrapy.Spider):\n+ name = \"albert_heijn\"\n+ item_attributes = {\"brand\": \"Albert Heijn\", \"brand_wikidata\": \"Q1653985\"}\n+ allowed_domains = [\"www.ah.nl\", \"www.ah.be\"]\n+ start_urls = (\n+ \"https://www.ah.nl/sitemaps/entities/stores/stores.xml\",\n+ \"https://www.ah.be/sitemaps/entities/stores/stores.xml\",\n+ )\n \n def parse(self, response):\n- stores = json.loads(response.body_as_unicode())\n- for store in stores['stores']:\n- try:\n- phone_number = store['phoneNumber']\n- except:\n- phone_number = \"\"\n- yield GeojsonPointItem(\n- lat=store['lat'],\n- lon=store['lng'],\n- addr_full=\"%s %s\" % (store['street'], store[\"housenr\"]),\n- city=store['city'],\n- phone=phone_number,\n- state=\"\",\n- postcode=store['zip'],\n- ref=store['no'],\n- country=\"Netherlands\",\n- website=\"https://www.ah.nl/winkel/albert-heijn/%s/%s/%s\" % (store['city'], store['street'], store['no'])\n- )\n+ response.selector.remove_namespaces()\n+ for url in response.xpath(\"//loc/text()\").extract():\n+ if re.search(\"/winkel/albert-heijn/\", url):\n+ yield scrapy.Request(url, callback=self.parse_store)\n+\n+ def parse_store(self, response):\n+ for ldjson in response.xpath(\n+ '//script[@type=\"application/ld+json\"]/text()'\n+ ).extract():\n+ data = json.loads(ldjson)\n+ if data[\"@type\"] != \"GroceryStore\":\n+ continue\n+\n+ opening_hours = OpeningHours()\n+ for spec in data[\"openingHoursSpecification\"]:\n+ opening_hours.add_range(\n+ spec[\"dayOfWeek\"][:2], spec[\"opens\"], spec[\"closes\"]\n+ )\n+\n+ properties = {\n+ \"ref\": response.url,\n+ \"website\": response.url,\n+ \"name\": data[\"name\"],\n+ \"phone\": data[\"telephone\"],\n+ \"lat\": data[\"geo\"][\"latitude\"],\n+ \"lon\": data[\"geo\"][\"longitude\"],\n+ \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"city\": data[\"address\"][\"addressLocality\"],\n+ \"postcode\": data[\"address\"][\"postalCode\"],\n+ \"country\": data[\"address\"][\"addressCountry\"],\n+ \"opening_hours\": opening_hours.as_opening_hours(),\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider albert_heijn is broken\nDuring the global build at 2021-06-02-14-42-40, spider **albert_heijn** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/albert_heijn.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AlbertHeijnSpider(scrapy.Spider):\n name = 'albert_heijn'\n item_attributes = {'brand': \"Albert Heijn\"}\n allowed_domains = ['www.ah.nl']\n\n def start_requests(self):\n url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n for store in stores['stores']:\n try:\n phone_number = store['phoneNumber']\n except:\n phone_number = \"\"\n yield GeojsonPointItem(\n lat=store['lat'],\n lon=store['lng'],\n addr_full=\"%s %s\" % (store['street'], store[\"housenr\"]),\n city=store['city'],\n phone=phone_number,\n state=\"\",\n postcode=store['zip'],\n ref=store['no'],\n country=\"Netherlands\",\n website=\"https://www.ah.nl/winkel/albert-heijn/%s/%s/%s\" % (store['city'], store['street'], store['no'])\n )\n", "path": "locations/spiders/albert_heijn.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass AlbertHeijnSpider(scrapy.Spider):\n name = \"albert_heijn\"\n item_attributes = {\"brand\": \"Albert Heijn\", \"brand_wikidata\": \"Q1653985\"}\n allowed_domains = [\"www.ah.nl\", \"www.ah.be\"]\n start_urls = (\n \"https://www.ah.nl/sitemaps/entities/stores/stores.xml\",\n \"https://www.ah.be/sitemaps/entities/stores/stores.xml\",\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for url in response.xpath(\"//loc/text()\").extract():\n if re.search(\"/winkel/albert-heijn/\", url):\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n for ldjson in response.xpath(\n '//script[@type=\"application/ld+json\"]/text()'\n ).extract():\n data = json.loads(ldjson)\n if data[\"@type\"] != \"GroceryStore\":\n continue\n\n opening_hours = OpeningHours()\n for spec in data[\"openingHoursSpecification\"]:\n opening_hours.add_range(\n spec[\"dayOfWeek\"][:2], spec[\"opens\"], spec[\"closes\"]\n )\n\n properties = {\n \"ref\": response.url,\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"country\": data[\"address\"][\"addressCountry\"],\n \"opening_hours\": opening_hours.as_opening_hours(),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/albert_heijn.py"}]}
| 776 | 782 |
gh_patches_debug_25605
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-1878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tesla is broken
During the global build at 2021-05-26-14-42-23, spider **tesla** failed with **486 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tesla.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tesla.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import re
3 import scrapy
4 import urllib.parse
5 from locations.items import GeojsonPointItem
6
7
8 class TeslaSpider(scrapy.Spider):
9 name = "tesla"
10 item_attributes = { 'brand': "Tesla" }
11 allowed_domains = ['www.tesla.com']
12 start_urls = [
13 'https://www.tesla.com/findus/list',
14 ]
15 download_delay = 0.5
16 custom_settings = {
17 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
18 }
19
20 def parse(self, response):
21 # Only scrape stores and service centers
22 country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services")]/@href').extract()
23 for country_url in country_urls:
24 yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)
25
26 def parse_store_list(self, response):
27 store_urls = response.xpath('//a[@class="fn org url"]/@href').extract()
28 for store_url in store_urls:
29 yield scrapy.Request(response.urljoin(store_url), callback=self.parse_store)
30
31 def parse_store(self, response):
32 # Skip if "Coming Soon" - no content to capture yet
33 if response.xpath('//span[@class="coming-soon"]/text()').extract_first() == "Coming Soon":
34 pass
35 else:
36 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
37
38 # city, state, and zip do not have separate classes - contained together in locality class as text
39 name = response.xpath('normalize-space(//header/h1/text())').extract_first()
40 common_name = response.xpath('normalize-space(//span[@class="common-name"]//text())').extract_first()
41 street_address = response.xpath('normalize-space(//span[@class="street-address"]//text())').extract_first()
42 city_state_zip = response.xpath('normalize-space(//span[@class="locality"]//text())').extract_first()
43
44 if common_name and street_address and city_state_zip:
45 addr_full = common_name + ' ' + street_address + ', ' + city_state_zip
46 elif street_address and not city_state_zip:
47 addr_full = street_address
48 elif city_state_zip and not street_address:
49 addr_full = city_state_zip
50 elif street_address and city_state_zip:
51 addr_full = street_address + ', ' + city_state_zip
52
53 country_url = response.xpath('//header[@class="findus-list-header"]/a/@href').extract_first()
54 country = urllib.parse.unquote_plus(re.search(r'.+/(.+?)/?(?:\.html|$)', country_url).group(1))
55 phone = response.xpath('normalize-space(//span[@class="tel"]/span[2]/text())').extract_first()
56 location_type = re.search(r".+/(.+?)/(.+?)/?(?:\.html|$)", response.url).group(1)
57
58 # map link varies across store pages
59 if response.xpath('normalize-space(//a[contains(@href,"maps.google")]/@href)').extract_first():
60 map_link = response.xpath('normalize-space(//a[contains(@href,"maps.google")]/@href)').extract_first()
61 else:
62 map_link = response.xpath('normalize-space(//img[contains(@src,"maps.google")]/@src)').extract_first()
63
64 # extract coordinates from map link
65 if re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link):
66 lat = re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link).group(1)
67 lon = re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link).group(2)
68 elif re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link):
69 lat = re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link).group(1)
70 lon = re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link).group(2)
71 else:
72 lat = None
73 lon = None
74
75 properties = {
76 'ref': ref,
77 'name': name,
78 'addr_full': addr_full,
79 'country': country,
80 'phone': phone,
81 'website': response.url,
82 'lat': lat,
83 'lon': lon,
84 'extras':
85 {
86 'location_type': location_type # Is this a service center or store/gallery
87 }
88 }
89
90 yield GeojsonPointItem(**properties)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/tesla.py b/locations/spiders/tesla.py
--- a/locations/spiders/tesla.py
+++ b/locations/spiders/tesla.py
@@ -19,7 +19,7 @@
def parse(self, response):
# Only scrape stores and service centers
- country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services")]/@href').extract()
+ country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services") or contains(@href,"superchargers")]/@href').extract()
for country_url in country_urls:
yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)
@@ -41,6 +41,7 @@
street_address = response.xpath('normalize-space(//span[@class="street-address"]//text())').extract_first()
city_state_zip = response.xpath('normalize-space(//span[@class="locality"]//text())').extract_first()
+ addr_full = ""
if common_name and street_address and city_state_zip:
addr_full = common_name + ' ' + street_address + ', ' + city_state_zip
elif street_address and not city_state_zip:
|
{"golden_diff": "diff --git a/locations/spiders/tesla.py b/locations/spiders/tesla.py\n--- a/locations/spiders/tesla.py\n+++ b/locations/spiders/tesla.py\n@@ -19,7 +19,7 @@\n \n def parse(self, response):\n # Only scrape stores and service centers\n- country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\")]/@href').extract()\n+ country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\") or contains(@href,\"superchargers\")]/@href').extract()\n for country_url in country_urls:\n yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)\n \n@@ -41,6 +41,7 @@\n street_address = response.xpath('normalize-space(//span[@class=\"street-address\"]//text())').extract_first()\n city_state_zip = response.xpath('normalize-space(//span[@class=\"locality\"]//text())').extract_first()\n \n+ addr_full = \"\"\n if common_name and street_address and city_state_zip:\n addr_full = common_name + ' ' + street_address + ', ' + city_state_zip\n elif street_address and not city_state_zip:\n", "issue": "Spider tesla is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tesla** failed with **486 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tesla.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nimport urllib.parse\nfrom locations.items import GeojsonPointItem\n\n\nclass TeslaSpider(scrapy.Spider):\n name = \"tesla\"\n item_attributes = { 'brand': \"Tesla\" }\n allowed_domains = ['www.tesla.com']\n start_urls = [\n 'https://www.tesla.com/findus/list',\n ]\n download_delay = 0.5\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse(self, response):\n # Only scrape stores and service centers\n country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\")]/@href').extract()\n for country_url in country_urls:\n yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)\n\n def parse_store_list(self, response):\n store_urls = response.xpath('//a[@class=\"fn org url\"]/@href').extract()\n for store_url in store_urls:\n yield scrapy.Request(response.urljoin(store_url), callback=self.parse_store)\n\n def parse_store(self, response):\n # Skip if \"Coming Soon\" - no content to capture yet\n if response.xpath('//span[@class=\"coming-soon\"]/text()').extract_first() == \"Coming Soon\":\n pass\n else:\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # city, state, and zip do not have separate classes - contained together in locality class as text\n name = response.xpath('normalize-space(//header/h1/text())').extract_first()\n common_name = response.xpath('normalize-space(//span[@class=\"common-name\"]//text())').extract_first()\n street_address = response.xpath('normalize-space(//span[@class=\"street-address\"]//text())').extract_first()\n city_state_zip = response.xpath('normalize-space(//span[@class=\"locality\"]//text())').extract_first()\n\n if common_name and street_address and city_state_zip:\n addr_full = common_name + ' ' + street_address + ', ' + city_state_zip\n elif street_address and not city_state_zip:\n addr_full = street_address\n elif city_state_zip and not street_address:\n addr_full = city_state_zip\n elif street_address and city_state_zip:\n addr_full = street_address + ', ' + city_state_zip\n\n country_url = response.xpath('//header[@class=\"findus-list-header\"]/a/@href').extract_first()\n country = urllib.parse.unquote_plus(re.search(r'.+/(.+?)/?(?:\\.html|$)', country_url).group(1))\n phone = response.xpath('normalize-space(//span[@class=\"tel\"]/span[2]/text())').extract_first()\n location_type = re.search(r\".+/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n # map link varies across store pages\n if response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first():\n map_link = response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first()\n else:\n map_link = response.xpath('normalize-space(//img[contains(@src,\"maps.google\")]/@src)').extract_first()\n\n # extract coordinates from map link\n if re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n elif re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n else:\n lat = None\n lon = None\n\n properties = {\n 'ref': ref,\n 'name': name,\n 'addr_full': addr_full,\n 'country': country,\n 'phone': phone,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n 'extras':\n {\n 'location_type': location_type # Is this a service center or store/gallery\n }\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tesla.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nimport urllib.parse\nfrom locations.items import GeojsonPointItem\n\n\nclass TeslaSpider(scrapy.Spider):\n name = \"tesla\"\n item_attributes = { 'brand': \"Tesla\" }\n allowed_domains = ['www.tesla.com']\n start_urls = [\n 'https://www.tesla.com/findus/list',\n ]\n download_delay = 0.5\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse(self, response):\n # Only scrape stores and service centers\n country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\") or contains(@href,\"superchargers\")]/@href').extract()\n for country_url in country_urls:\n yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)\n\n def parse_store_list(self, response):\n store_urls = response.xpath('//a[@class=\"fn org url\"]/@href').extract()\n for store_url in store_urls:\n yield scrapy.Request(response.urljoin(store_url), callback=self.parse_store)\n\n def parse_store(self, response):\n # Skip if \"Coming Soon\" - no content to capture yet\n if response.xpath('//span[@class=\"coming-soon\"]/text()').extract_first() == \"Coming Soon\":\n pass\n else:\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # city, state, and zip do not have separate classes - contained together in locality class as text\n name = response.xpath('normalize-space(//header/h1/text())').extract_first()\n common_name = response.xpath('normalize-space(//span[@class=\"common-name\"]//text())').extract_first()\n street_address = response.xpath('normalize-space(//span[@class=\"street-address\"]//text())').extract_first()\n city_state_zip = response.xpath('normalize-space(//span[@class=\"locality\"]//text())').extract_first()\n\n addr_full = \"\"\n if common_name and street_address and city_state_zip:\n addr_full = common_name + ' ' + street_address + ', ' + city_state_zip\n elif street_address and not city_state_zip:\n addr_full = street_address\n elif city_state_zip and not street_address:\n addr_full = city_state_zip\n elif street_address and city_state_zip:\n addr_full = street_address + ', ' + city_state_zip\n\n country_url = response.xpath('//header[@class=\"findus-list-header\"]/a/@href').extract_first()\n country = urllib.parse.unquote_plus(re.search(r'.+/(.+?)/?(?:\\.html|$)', country_url).group(1))\n phone = response.xpath('normalize-space(//span[@class=\"tel\"]/span[2]/text())').extract_first()\n location_type = re.search(r\".+/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n # map link varies across store pages\n if response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first():\n map_link = response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first()\n else:\n map_link = response.xpath('normalize-space(//img[contains(@src,\"maps.google\")]/@src)').extract_first()\n\n # extract coordinates from map link\n if re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n elif re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n else:\n lat = None\n lon = None\n\n properties = {\n 'ref': ref,\n 'name': name,\n 'addr_full': addr_full,\n 'country': country,\n 'phone': phone,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n 'extras':\n {\n 'location_type': location_type # Is this a service center or store/gallery\n }\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tesla.py"}]}
| 1,692 | 275 |
gh_patches_debug_1658
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-4628
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.twitcasting: Writes JSON into video files when it shouldn't
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
https://github.com/streamlink/streamlink/pull/4608 introduced a bug of JSON being written to the output file.
- When running streamlink on a channel that is live but only for members, using `-o out.mp4` flag to output to a file, it creates a video file containing just a single JSON line in it:
```
$ cat out.mp4
{"type":"status","code":403,"text":"Access Forbidden"}
```
The expected behavior is that it doesn't create the file in such situation, like it used to behave before https://github.com/streamlink/streamlink/pull/4608 fixes were made.
- It also adds `{"type":"status","code":504,"text":"End of Live"}` at the end of video files when the stream ends:
```
$ xxd -s -128 -c 16 out.ts
24b5bee9: 5c75 7cc6 7e38 e099 55d9 6257 59d8 eb6e \u|.~8..U.bWY..n
24b5bef9: b7aa 49bb ef3a dd18 7767 8c77 7dc6 6ade ..I..:..wg.w}.j.
24b5bf09: 6d54 2175 2acf 0926 400f 0449 2bc6 a816 mT!u*..&@..I+...
24b5bf19: 3523 72e9 db4d 6c5a 5aba ec75 3c0a ad72 5#r..MlZZ..u<..r
24b5bf29: 2258 0b2f ebc2 b50a 7ed3 bbbd 8d30 c77b "X./....~....0.{
24b5bf39: 2274 7970 6522 3a22 7374 6174 7573 222c "type":"status",
24b5bf49: 2263 6f64 6522 3a35 3034 2c22 7465 7874 "code":504,"text
24b5bf59: 223a 2245 6e64 206f 6620 4c69 7665 227d ":"End of Live"}
```

- Perhaps it shouldn't be writing any `response['type'] == 'status'` to the file?
- While at it, maybe there is something else that it's writing to a video file that it shouldn't? As mentioned in https://github.com/streamlink/streamlink/issues/4604#issuecomment-1166177130, Twitcasting also sends `{"type":"event","code":100,"text":""}` sometimes. Would that get written into the video file too? Is that something that should be written into it?
### Debug log
```text
[cli][debug] OS: Linux-5.10.0-14-amd64-x86_64-with-glibc2.31
[cli][debug] Python: 3.9.2
[cli][debug] Streamlink: 4.1.0+37.g2c564dbe
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.0
[cli][debug] lxml: 4.7.1
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.10.1
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.0
[cli][debug] websocket-client: 1.2.3
[cli][debug] Arguments:
[cli][debug] url=https://twitcasting.tv/[REDACTED]
[cli][debug] stream=['best']
[cli][debug] --config=['../config']
[cli][debug] --loglevel=debug
[cli][debug] --output=[REDACTED]
[cli][debug] --retry-streams=1.0
[cli][debug] --retry-max=300
[cli][info] Found matching plugin twitcasting for URL https://twitcasting.tv/[REDACTED]
[plugins.twitcasting][debug] Live stream info: {'movie': {'id': [REDACTED], 'live': True}, 'fmp4': {'host': '202-218-171-197.twitcasting.tv', 'proto': 'wss', 'source': False, 'mobilesource': False}}
[plugins.twitcasting][debug] Real stream url: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base
[cli][info] Available streams: base (worst, best)
[cli][info] Opening stream: base (stream)
[cli][info] Writing output to
[REDACTED]
[cli][debug] Checking file output
[plugin.api.websocket][debug] Connecting to: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base
[cli][debug] Pre-buffering 8192 bytes
[plugin.api.websocket][debug] Connected: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base
[cli][debug] Writing stream to output
[plugin.api.websocket][error] Connection to remote host was lost.
[plugin.api.websocket][debug] Closed: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base
[cli][info] Stream ended
[cli][info] Closing currently open stream...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/twitcasting.py`
Content:
```
1 """
2 $description Global live broadcasting and live broadcast archiving social platform.
3 $url twitcasting.tv
4 $type live
5 """
6
7 import hashlib
8 import logging
9 import re
10
11 from streamlink.buffers import RingBuffer
12 from streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.plugin.api.websocket import WebsocketClient
15 from streamlink.stream.stream import Stream, StreamIO
16 from streamlink.utils.url import update_qsd
17
18
19 log = logging.getLogger(__name__)
20
21
22 @pluginmatcher(re.compile(
23 r"https?://twitcasting\.tv/(?P<channel>[^/]+)"
24 ))
25 class TwitCasting(Plugin):
26 arguments = PluginArguments(
27 PluginArgument(
28 "password",
29 sensitive=True,
30 metavar="PASSWORD",
31 help="Password for private Twitcasting streams."
32 )
33 )
34 _STREAM_INFO_URL = "https://twitcasting.tv/streamserver.php?target={channel}&mode=client"
35 _STREAM_REAL_URL = "{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}"
36
37 _STREAM_INFO_SCHEMA = validate.Schema({
38 validate.optional("movie"): {
39 "id": int,
40 "live": bool
41 },
42 validate.optional("fmp4"): {
43 "host": str,
44 "proto": str,
45 "source": bool,
46 "mobilesource": bool
47 }
48 })
49
50 def __init__(self, url):
51 super().__init__(url)
52 self.channel = self.match.group("channel")
53
54 def _get_streams(self):
55 stream_info = self._get_stream_info()
56 log.debug(f"Live stream info: {stream_info}")
57
58 if not stream_info.get("movie") or not stream_info["movie"]["live"]:
59 raise PluginError("The live stream is offline")
60
61 if not stream_info.get("fmp4"):
62 raise PluginError("Login required")
63
64 # Keys are already validated by schema above
65 proto = stream_info["fmp4"]["proto"]
66 host = stream_info["fmp4"]["host"]
67 movie_id = stream_info["movie"]["id"]
68
69 if stream_info["fmp4"]["source"]:
70 mode = "main" # High quality
71 elif stream_info["fmp4"]["mobilesource"]:
72 mode = "mobilesource" # Medium quality
73 else:
74 mode = "base" # Low quality
75
76 if (proto == '') or (host == '') or (not movie_id):
77 raise PluginError(f"No stream available for user {self.channel}")
78
79 real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)
80
81 password = self.options.get("password")
82 if password is not None:
83 password_hash = hashlib.md5(password.encode()).hexdigest()
84 real_stream_url = update_qsd(real_stream_url, {"word": password_hash})
85
86 log.debug(f"Real stream url: {real_stream_url}")
87
88 return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}
89
90 def _get_stream_info(self):
91 url = self._STREAM_INFO_URL.format(channel=self.channel)
92 res = self.session.http.get(url)
93 return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)
94
95
96 class TwitCastingWsClient(WebsocketClient):
97 def __init__(self, buffer: RingBuffer, *args, **kwargs):
98 self.buffer = buffer
99 super().__init__(*args, **kwargs)
100
101 def on_close(self, *args, **kwargs):
102 super().on_close(*args, **kwargs)
103 self.buffer.close()
104
105 def on_data(self, wsapp, data, data_type, cont):
106 if data_type == self.OPCODE_TEXT:
107 data = bytes(data, "utf-8")
108
109 try:
110 self.buffer.write(data)
111 except Exception as err:
112 log.error(err)
113 self.close()
114
115
116 class TwitCastingReader(StreamIO):
117 def __init__(self, stream: "TwitCastingStream", timeout=None):
118 super().__init__()
119 self.session = stream.session
120 self.stream = stream
121 self.timeout = timeout or self.session.options.get("stream-timeout")
122
123 buffer_size = self.session.get_option("ringbuffer-size")
124 self.buffer = RingBuffer(buffer_size)
125
126 self.wsclient = TwitCastingWsClient(
127 self.buffer,
128 stream.session,
129 stream.url,
130 origin="https://twitcasting.tv/"
131 )
132
133 def open(self):
134 self.wsclient.start()
135
136 def close(self):
137 self.wsclient.close()
138 self.buffer.close()
139
140 def read(self, size):
141 return self.buffer.read(
142 size,
143 block=self.wsclient.is_alive(),
144 timeout=self.timeout
145 )
146
147
148 class TwitCastingStream(Stream):
149 def __init__(self, session, url):
150 super().__init__(session)
151 self.url = url
152
153 def to_url(self):
154 return self.url
155
156 def open(self):
157 reader = TwitCastingReader(self)
158 reader.open()
159 return reader
160
161
162 __plugin__ = TwitCasting
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/twitcasting.py b/src/streamlink/plugins/twitcasting.py
--- a/src/streamlink/plugins/twitcasting.py
+++ b/src/streamlink/plugins/twitcasting.py
@@ -104,7 +104,7 @@
def on_data(self, wsapp, data, data_type, cont):
if data_type == self.OPCODE_TEXT:
- data = bytes(data, "utf-8")
+ return
try:
self.buffer.write(data)
|
{"golden_diff": "diff --git a/src/streamlink/plugins/twitcasting.py b/src/streamlink/plugins/twitcasting.py\n--- a/src/streamlink/plugins/twitcasting.py\n+++ b/src/streamlink/plugins/twitcasting.py\n@@ -104,7 +104,7 @@\n \n def on_data(self, wsapp, data, data_type, cont):\n if data_type == self.OPCODE_TEXT:\n- data = bytes(data, \"utf-8\")\n+ return\n \n try:\n self.buffer.write(data)\n", "issue": "plugins.twitcasting: Writes JSON into video files when it shouldn't\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest build from the master branch\r\n\r\n### Description\r\n\r\nhttps://github.com/streamlink/streamlink/pull/4608 introduced a bug of JSON being written to the output file.\r\n\r\n- When running streamlink on a channel that is live but only for members, using `-o out.mp4` flag to output to a file, it creates a video file containing just a single JSON line in it:\r\n\r\n ```\r\n $ cat out.mp4\r\n {\"type\":\"status\",\"code\":403,\"text\":\"Access Forbidden\"}\r\n ```\r\n\r\n The expected behavior is that it doesn't create the file in such situation, like it used to behave before https://github.com/streamlink/streamlink/pull/4608 fixes were made.\r\n\r\n- It also adds `{\"type\":\"status\",\"code\":504,\"text\":\"End of Live\"}` at the end of video files when the stream ends:\r\n\r\n ```\r\n $ xxd -s -128 -c 16 out.ts\r\n 24b5bee9: 5c75 7cc6 7e38 e099 55d9 6257 59d8 eb6e \\u|.~8..U.bWY..n\r\n 24b5bef9: b7aa 49bb ef3a dd18 7767 8c77 7dc6 6ade ..I..:..wg.w}.j.\r\n 24b5bf09: 6d54 2175 2acf 0926 400f 0449 2bc6 a816 mT!u*..&@..I+...\r\n 24b5bf19: 3523 72e9 db4d 6c5a 5aba ec75 3c0a ad72 5#r..MlZZ..u<..r\r\n 24b5bf29: 2258 0b2f ebc2 b50a 7ed3 bbbd 8d30 c77b \"X./....~....0.{\r\n 24b5bf39: 2274 7970 6522 3a22 7374 6174 7573 222c \"type\":\"status\",\r\n 24b5bf49: 2263 6f64 6522 3a35 3034 2c22 7465 7874 \"code\":504,\"text\r\n 24b5bf59: 223a 2245 6e64 206f 6620 4c69 7665 227d \":\"End of Live\"}\r\n ```\r\n \r\n\r\n\r\n- Perhaps it shouldn't be writing any `response['type'] == 'status'` to the file?\r\n\r\n- While at it, maybe there is something else that it's writing to a video file that it shouldn't? As mentioned in https://github.com/streamlink/streamlink/issues/4604#issuecomment-1166177130, Twitcasting also sends `{\"type\":\"event\",\"code\":100,\"text\":\"\"}` sometimes. Would that get written into the video file too? Is that something that should be written into it?\r\n\r\n### Debug log\r\n\r\n```text\r\n[cli][debug] OS: Linux-5.10.0-14-amd64-x86_64-with-glibc2.31\r\n[cli][debug] Python: 3.9.2\r\n[cli][debug] Streamlink: 4.1.0+37.g2c564dbe\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.0\r\n[cli][debug] lxml: 4.7.1\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.10.1\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.0\r\n[cli][debug] websocket-client: 1.2.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://twitcasting.tv/[REDACTED]\r\n[cli][debug] stream=['best']\r\n[cli][debug] --config=['../config']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --output=[REDACTED]\r\n[cli][debug] --retry-streams=1.0\r\n[cli][debug] --retry-max=300\r\n[cli][info] Found matching plugin twitcasting for URL https://twitcasting.tv/[REDACTED]\r\n[plugins.twitcasting][debug] Live stream info: {'movie': {'id': [REDACTED], 'live': True}, 'fmp4': {'host': '202-218-171-197.twitcasting.tv', 'proto': 'wss', 'source': False, 'mobilesource': False}}\r\n[plugins.twitcasting][debug] Real stream url: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base\r\n[cli][info] Available streams: base (worst, best)\r\n[cli][info] Opening stream: base (stream)\r\n[cli][info] Writing output to\r\n[REDACTED]\r\n[cli][debug] Checking file output\r\n[plugin.api.websocket][debug] Connecting to: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[plugin.api.websocket][debug] Connected: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base\r\n[cli][debug] Writing stream to output\r\n[plugin.api.websocket][error] Connection to remote host was lost.\r\n[plugin.api.websocket][debug] Closed: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base\r\n[cli][info] Stream ended\r\n[cli][info] Closing currently open stream...\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Global live broadcasting and live broadcast archiving social platform.\n$url twitcasting.tv\n$type live\n\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom streamlink.buffers import RingBuffer\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.stream import Stream, StreamIO\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://twitcasting\\.tv/(?P<channel>[^/]+)\"\n))\nclass TwitCasting(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"Password for private Twitcasting streams.\"\n )\n )\n _STREAM_INFO_URL = \"https://twitcasting.tv/streamserver.php?target={channel}&mode=client\"\n _STREAM_REAL_URL = \"{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}\"\n\n _STREAM_INFO_SCHEMA = validate.Schema({\n validate.optional(\"movie\"): {\n \"id\": int,\n \"live\": bool\n },\n validate.optional(\"fmp4\"): {\n \"host\": str,\n \"proto\": str,\n \"source\": bool,\n \"mobilesource\": bool\n }\n })\n\n def __init__(self, url):\n super().__init__(url)\n self.channel = self.match.group(\"channel\")\n\n def _get_streams(self):\n stream_info = self._get_stream_info()\n log.debug(f\"Live stream info: {stream_info}\")\n\n if not stream_info.get(\"movie\") or not stream_info[\"movie\"][\"live\"]:\n raise PluginError(\"The live stream is offline\")\n\n if not stream_info.get(\"fmp4\"):\n raise PluginError(\"Login required\")\n\n # Keys are already validated by schema above\n proto = stream_info[\"fmp4\"][\"proto\"]\n host = stream_info[\"fmp4\"][\"host\"]\n movie_id = stream_info[\"movie\"][\"id\"]\n\n if stream_info[\"fmp4\"][\"source\"]:\n mode = \"main\" # High quality\n elif stream_info[\"fmp4\"][\"mobilesource\"]:\n mode = \"mobilesource\" # Medium quality\n else:\n mode = \"base\" # Low quality\n\n if (proto == '') or (host == '') or (not movie_id):\n raise PluginError(f\"No stream available for user {self.channel}\")\n\n real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)\n\n password = self.options.get(\"password\")\n if password is not None:\n password_hash = hashlib.md5(password.encode()).hexdigest()\n real_stream_url = update_qsd(real_stream_url, {\"word\": password_hash})\n\n log.debug(f\"Real stream url: {real_stream_url}\")\n\n return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}\n\n def _get_stream_info(self):\n url = self._STREAM_INFO_URL.format(channel=self.channel)\n res = self.session.http.get(url)\n return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)\n\n\nclass TwitCastingWsClient(WebsocketClient):\n def __init__(self, buffer: RingBuffer, *args, **kwargs):\n self.buffer = buffer\n super().__init__(*args, **kwargs)\n\n def on_close(self, *args, **kwargs):\n super().on_close(*args, **kwargs)\n self.buffer.close()\n\n def on_data(self, wsapp, data, data_type, cont):\n if data_type == self.OPCODE_TEXT:\n data = bytes(data, \"utf-8\")\n\n try:\n self.buffer.write(data)\n except Exception as err:\n log.error(err)\n self.close()\n\n\nclass TwitCastingReader(StreamIO):\n def __init__(self, stream: \"TwitCastingStream\", timeout=None):\n super().__init__()\n self.session = stream.session\n self.stream = stream\n self.timeout = timeout or self.session.options.get(\"stream-timeout\")\n\n buffer_size = self.session.get_option(\"ringbuffer-size\")\n self.buffer = RingBuffer(buffer_size)\n\n self.wsclient = TwitCastingWsClient(\n self.buffer,\n stream.session,\n stream.url,\n origin=\"https://twitcasting.tv/\"\n )\n\n def open(self):\n self.wsclient.start()\n\n def close(self):\n self.wsclient.close()\n self.buffer.close()\n\n def read(self, size):\n return self.buffer.read(\n size,\n block=self.wsclient.is_alive(),\n timeout=self.timeout\n )\n\n\nclass TwitCastingStream(Stream):\n def __init__(self, session, url):\n super().__init__(session)\n self.url = url\n\n def to_url(self):\n return self.url\n\n def open(self):\n reader = TwitCastingReader(self)\n reader.open()\n return reader\n\n\n__plugin__ = TwitCasting\n", "path": "src/streamlink/plugins/twitcasting.py"}], "after_files": [{"content": "\"\"\"\n$description Global live broadcasting and live broadcast archiving social platform.\n$url twitcasting.tv\n$type live\n\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom streamlink.buffers import RingBuffer\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.stream import Stream, StreamIO\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://twitcasting\\.tv/(?P<channel>[^/]+)\"\n))\nclass TwitCasting(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"Password for private Twitcasting streams.\"\n )\n )\n _STREAM_INFO_URL = \"https://twitcasting.tv/streamserver.php?target={channel}&mode=client\"\n _STREAM_REAL_URL = \"{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}\"\n\n _STREAM_INFO_SCHEMA = validate.Schema({\n validate.optional(\"movie\"): {\n \"id\": int,\n \"live\": bool\n },\n validate.optional(\"fmp4\"): {\n \"host\": str,\n \"proto\": str,\n \"source\": bool,\n \"mobilesource\": bool\n }\n })\n\n def __init__(self, url):\n super().__init__(url)\n self.channel = self.match.group(\"channel\")\n\n def _get_streams(self):\n stream_info = self._get_stream_info()\n log.debug(f\"Live stream info: {stream_info}\")\n\n if not stream_info.get(\"movie\") or not stream_info[\"movie\"][\"live\"]:\n raise PluginError(\"The live stream is offline\")\n\n if not stream_info.get(\"fmp4\"):\n raise PluginError(\"Login required\")\n\n # Keys are already validated by schema above\n proto = stream_info[\"fmp4\"][\"proto\"]\n host = stream_info[\"fmp4\"][\"host\"]\n movie_id = stream_info[\"movie\"][\"id\"]\n\n if stream_info[\"fmp4\"][\"source\"]:\n mode = \"main\" # High quality\n elif stream_info[\"fmp4\"][\"mobilesource\"]:\n mode = \"mobilesource\" # Medium quality\n else:\n mode = \"base\" # Low quality\n\n if (proto == '') or (host == '') or (not movie_id):\n raise PluginError(f\"No stream available for user {self.channel}\")\n\n real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)\n\n password = self.options.get(\"password\")\n if password is not None:\n password_hash = hashlib.md5(password.encode()).hexdigest()\n real_stream_url = update_qsd(real_stream_url, {\"word\": password_hash})\n\n log.debug(f\"Real stream url: {real_stream_url}\")\n\n return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}\n\n def _get_stream_info(self):\n url = self._STREAM_INFO_URL.format(channel=self.channel)\n res = self.session.http.get(url)\n return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)\n\n\nclass TwitCastingWsClient(WebsocketClient):\n def __init__(self, buffer: RingBuffer, *args, **kwargs):\n self.buffer = buffer\n super().__init__(*args, **kwargs)\n\n def on_close(self, *args, **kwargs):\n super().on_close(*args, **kwargs)\n self.buffer.close()\n\n def on_data(self, wsapp, data, data_type, cont):\n if data_type == self.OPCODE_TEXT:\n return\n\n try:\n self.buffer.write(data)\n except Exception as err:\n log.error(err)\n self.close()\n\n\nclass TwitCastingReader(StreamIO):\n def __init__(self, stream: \"TwitCastingStream\", timeout=None):\n super().__init__()\n self.session = stream.session\n self.stream = stream\n self.timeout = timeout or self.session.options.get(\"stream-timeout\")\n\n buffer_size = self.session.get_option(\"ringbuffer-size\")\n self.buffer = RingBuffer(buffer_size)\n\n self.wsclient = TwitCastingWsClient(\n self.buffer,\n stream.session,\n stream.url,\n origin=\"https://twitcasting.tv/\"\n )\n\n def open(self):\n self.wsclient.start()\n\n def close(self):\n self.wsclient.close()\n self.buffer.close()\n\n def read(self, size):\n return self.buffer.read(\n size,\n block=self.wsclient.is_alive(),\n timeout=self.timeout\n )\n\n\nclass TwitCastingStream(Stream):\n def __init__(self, session, url):\n super().__init__(session)\n self.url = url\n\n def to_url(self):\n return self.url\n\n def open(self):\n reader = TwitCastingReader(self)\n reader.open()\n return reader\n\n\n__plugin__ = TwitCasting\n", "path": "src/streamlink/plugins/twitcasting.py"}]}
| 3,527 | 110 |
gh_patches_debug_19241
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-2240
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not overwrite global `bytes` in code or examples
It looks like there's a few places we overwrite `bytes` (the type identifier) with a local variable.
```
$ git grep -E -e '^ +bytes *=' -- '*.py'
pwnlib/commandline/disasm.py:81: bytes = disasm(dat, vma=safeeval.const(args.address), instructions=False, offset=False)
pwnlib/commandline/elfpatch.py:29: bytes = unhex(a.bytes)
pwnlib/elf/elf.py:195: bytes = 4
```
And a few cases we do it in tests, which could have cross-test impact if the global state isn't reset (hint: it isn't).
```
~/pwntools $ git grep -E -e '^ +>>> bytes *=' -- '*.py'
pwnlib/runner.py:42: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
pwnlib/runner.py:48: >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
pwnlib/runner.py:87: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/runner.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import os
5 import tempfile
6
7 from pwnlib.context import LocalContext
8 from pwnlib.elf import ELF
9 from pwnlib.tubes.process import process
10
11 __all__ = ['run_assembly', 'run_shellcode', 'run_assembly_exitcode', 'run_shellcode_exitcode']
12
13 @LocalContext
14 def run_assembly(assembly):
15 """
16 Given an assembly listing, assemble and execute it.
17
18 Returns:
19
20 A :class:`pwnlib.tubes.process.process` tube to interact with the process.
21
22 Example:
23
24 >>> p = run_assembly('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
25 >>> p.wait_for_close()
26 >>> p.poll()
27 3
28
29 >>> p = run_assembly('mov r0, #12; mov r7, #1; svc #0', arch='arm')
30 >>> p.wait_for_close()
31 >>> p.poll()
32 12
33 """
34 return ELF.from_assembly(assembly).process()
35
36 @LocalContext
37 def run_shellcode(bytes, **kw):
38 """Given assembled machine code bytes, execute them.
39
40 Example:
41
42 >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
43 >>> p = run_shellcode(bytes)
44 >>> p.wait_for_close()
45 >>> p.poll()
46 3
47
48 >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
49 >>> p = run_shellcode(bytes, arch='arm')
50 >>> p.wait_for_close()
51 >>> p.poll()
52 12
53 """
54 return ELF.from_bytes(bytes, **kw).process()
55
56 @LocalContext
57 def run_assembly_exitcode(assembly):
58 """
59 Given an assembly listing, assemble and execute it, and wait for
60 the process to die.
61
62 Returns:
63
64 The exit code of the process.
65
66 Example:
67
68 >>> run_assembly_exitcode('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
69 3
70 """
71 p = run_assembly(assembly)
72 p.wait_for_close()
73 return p.poll()
74
75 @LocalContext
76 def run_shellcode_exitcode(bytes):
77 """
78 Given assembled machine code bytes, execute them, and wait for
79 the process to die.
80
81 Returns:
82
83 The exit code of the process.
84
85 Example:
86
87 >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
88 >>> run_shellcode_exitcode(bytes)
89 3
90 """
91 p = run_shellcode(bytes)
92 p.wait_for_close()
93 return p.poll()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/runner.py b/pwnlib/runner.py
--- a/pwnlib/runner.py
+++ b/pwnlib/runner.py
@@ -39,14 +39,14 @@
Example:
- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
- >>> p = run_shellcode(bytes)
+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
+ >>> p = run_shellcode(insn_bytes)
>>> p.wait_for_close()
>>> p.poll()
3
- >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
- >>> p = run_shellcode(bytes, arch='arm')
+ >>> insn_bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
+ >>> p = run_shellcode(insn_bytes, arch='arm')
>>> p.wait_for_close()
>>> p.poll()
12
@@ -84,8 +84,8 @@
Example:
- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
- >>> run_shellcode_exitcode(bytes)
+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
+ >>> run_shellcode_exitcode(insn_bytes)
3
"""
p = run_shellcode(bytes)
|
{"golden_diff": "diff --git a/pwnlib/runner.py b/pwnlib/runner.py\n--- a/pwnlib/runner.py\n+++ b/pwnlib/runner.py\n@@ -39,14 +39,14 @@\n \n Example:\n \n- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n- >>> p = run_shellcode(bytes)\n+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n+ >>> p = run_shellcode(insn_bytes)\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n \n- >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n- >>> p = run_shellcode(bytes, arch='arm')\n+ >>> insn_bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n+ >>> p = run_shellcode(insn_bytes, arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n@@ -84,8 +84,8 @@\n \n Example:\n \n- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n- >>> run_shellcode_exitcode(bytes)\n+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n+ >>> run_shellcode_exitcode(insn_bytes)\n 3\n \"\"\"\n p = run_shellcode(bytes)\n", "issue": "Do not overwrite global `bytes` in code or examples\nIt looks like there's a few places we overwrite `bytes` (the type identifier) with a local variable.\r\n\r\n```\r\n$ git grep -E -e '^ +bytes *=' -- '*.py'\r\npwnlib/commandline/disasm.py:81: bytes = disasm(dat, vma=safeeval.const(args.address), instructions=False, offset=False)\r\npwnlib/commandline/elfpatch.py:29: bytes = unhex(a.bytes)\r\npwnlib/elf/elf.py:195: bytes = 4\r\n```\r\n\r\nAnd a few cases we do it in tests, which could have cross-test impact if the global state isn't reset (hint: it isn't).\r\n\r\n```\r\n~/pwntools $ git grep -E -e '^ +>>> bytes *=' -- '*.py'\r\npwnlib/runner.py:42: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\r\npwnlib/runner.py:48: >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\r\npwnlib/runner.py:87: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport tempfile\n\nfrom pwnlib.context import LocalContext\nfrom pwnlib.elf import ELF\nfrom pwnlib.tubes.process import process\n\n__all__ = ['run_assembly', 'run_shellcode', 'run_assembly_exitcode', 'run_shellcode_exitcode']\n\n@LocalContext\ndef run_assembly(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it.\n\n Returns:\n\n A :class:`pwnlib.tubes.process.process` tube to interact with the process.\n\n Example:\n\n >>> p = run_assembly('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> p = run_assembly('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_assembly(assembly).process()\n\n@LocalContext\ndef run_shellcode(bytes, **kw):\n \"\"\"Given assembled machine code bytes, execute them.\n\n Example:\n\n >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p = run_shellcode(bytes)\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p = run_shellcode(bytes, arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_bytes(bytes, **kw).process()\n\n@LocalContext\ndef run_assembly_exitcode(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> run_assembly_exitcode('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n 3\n \"\"\"\n p = run_assembly(assembly)\n p.wait_for_close()\n return p.poll()\n\n@LocalContext\ndef run_shellcode_exitcode(bytes):\n \"\"\"\n Given assembled machine code bytes, execute them, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> run_shellcode_exitcode(bytes)\n 3\n \"\"\"\n p = run_shellcode(bytes)\n p.wait_for_close()\n return p.poll()\n", "path": "pwnlib/runner.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport tempfile\n\nfrom pwnlib.context import LocalContext\nfrom pwnlib.elf import ELF\nfrom pwnlib.tubes.process import process\n\n__all__ = ['run_assembly', 'run_shellcode', 'run_assembly_exitcode', 'run_shellcode_exitcode']\n\n@LocalContext\ndef run_assembly(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it.\n\n Returns:\n\n A :class:`pwnlib.tubes.process.process` tube to interact with the process.\n\n Example:\n\n >>> p = run_assembly('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> p = run_assembly('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_assembly(assembly).process()\n\n@LocalContext\ndef run_shellcode(bytes, **kw):\n \"\"\"Given assembled machine code bytes, execute them.\n\n Example:\n\n >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p = run_shellcode(insn_bytes)\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> insn_bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p = run_shellcode(insn_bytes, arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_bytes(bytes, **kw).process()\n\n@LocalContext\ndef run_assembly_exitcode(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> run_assembly_exitcode('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n 3\n \"\"\"\n p = run_assembly(assembly)\n p.wait_for_close()\n return p.poll()\n\n@LocalContext\ndef run_shellcode_exitcode(bytes):\n \"\"\"\n Given assembled machine code bytes, execute them, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> run_shellcode_exitcode(insn_bytes)\n 3\n \"\"\"\n p = run_shellcode(bytes)\n p.wait_for_close()\n return p.poll()\n", "path": "pwnlib/runner.py"}]}
| 1,355 | 364 |
gh_patches_debug_35111
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2854
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
King Island: battery never seems to discharge
I've been keeping an eye on AUS-TAS-KI since it was added to the map. Charging works fine, discharging doesn't show up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/AUS_TAS_KI.py`
Content:
```
1 # Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456
2 # Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636
3 # A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island
4 # As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html
5 # About the data, the feed we get seems to be counters with a 2 seconds interval.
6 # That means that if we fetch these counters every 15 minutes, we only are reading "instantaneous" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.
7
8 import asyncio
9 import json
10 import logging
11 import arrow
12 from signalr import Connection
13 from requests import Session
14
15 class SignalR:
16 def __init__(self, url):
17 self.url = url
18
19 def update_res(self, msg):
20 if (msg != {}):
21 self.res = msg
22
23 def get_value(self, hub, method):
24 self.res = {}
25 with Session() as session:
26 #create a connection
27 connection = Connection(self.url, session)
28 chat = connection.register_hub(hub)
29 chat.client.on(method, self.update_res)
30 connection.start()
31 connection.wait(3)
32 connection.close()
33 return self.res
34
35 def parse_payload(logger, payload):
36 technologies_parsed = {}
37 if not 'technologies' in payload:
38 raise KeyError(
39 f"No 'technologies' in payload\n"
40 f"serie : {json.dumps(payload)}"
41 )
42 else:
43 logger.debug(f"serie : {json.dumps(payload)}")
44 for technology in payload['technologies']:
45 assert technology['unit'] == 'kW'
46 # The upstream API gives us kW, we need MW
47 technologies_parsed[technology['id']] = int(technology['value'])/1000
48 logger.debug(f"production : {json.dumps(technologies_parsed)}")
49
50 biodiesel_percent = payload['biodiesel']['percent']
51
52 return technologies_parsed, biodiesel_percent
53
54 # Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid
55 def format_storage_techs(technologies_parsed):
56 storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']
57 battery_production = storage_techs if storage_techs > 0 else 0
58 battery_storage = storage_techs if storage_techs < 0 else 0
59
60 return battery_production, battery_storage
61
62 def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
63
64 if target_datetime is not None:
65 raise NotImplementedError('The datasource currently implemented is only real time')
66
67 payload = SignalR("https://data.ajenti.com.au/live/signalr").get_value("TagHub", "Dashboard")
68 technologies_parsed, biodiesel_percent = parse_payload(logger, payload)
69 battery_production, battery_storage = format_storage_techs(technologies_parsed)
70 return {
71 'zoneKey': zone_key,
72 'datetime': arrow.now(tz='Australia/Currie').datetime,
73 'production': {
74 'battery discharge': battery_production,
75 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,
76 'coal': 0,
77 'gas': 0,
78 'hydro': 0,
79 'nuclear': 0,
80 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,
81 'solar': technologies_parsed['solar'],
82 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption
83 'geothermal': 0,
84 'unknown': 0
85 },
86 'storage': {
87 'battery': battery_storage*-1
88 },
89 'source': 'https://data.ajenti.com.au/KIREIP/index.html'
90 }
91
92 if __name__ == '__main__':
93 print(fetch_production())
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py
--- a/parsers/AUS_TAS_KI.py
+++ b/parsers/AUS_TAS_KI.py
@@ -52,12 +52,10 @@
return technologies_parsed, biodiesel_percent
# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid
-def format_storage_techs(technologies_parsed):
+def sum_storage_techs(technologies_parsed):
storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']
- battery_production = storage_techs if storage_techs > 0 else 0
- battery_storage = storage_techs if storage_techs < 0 else 0
- return battery_production, battery_storage
+ return storage_techs
def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
@@ -66,12 +64,11 @@
payload = SignalR("https://data.ajenti.com.au/live/signalr").get_value("TagHub", "Dashboard")
technologies_parsed, biodiesel_percent = parse_payload(logger, payload)
- battery_production, battery_storage = format_storage_techs(technologies_parsed)
+ storage_techs = sum_storage_techs(technologies_parsed)
return {
'zoneKey': zone_key,
'datetime': arrow.now(tz='Australia/Currie').datetime,
'production': {
- 'battery discharge': battery_production,
'biomass': technologies_parsed['diesel']*biodiesel_percent/100,
'coal': 0,
'gas': 0,
@@ -84,9 +81,9 @@
'unknown': 0
},
'storage': {
- 'battery': battery_storage*-1
+ 'battery': storage_techs*-1 #Somewhat counterintuitively,to ElectricityMap positive means charging and negative means discharging
},
- 'source': 'https://data.ajenti.com.au/KIREIP/index.html'
+ 'source': 'https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island' #Iframe: https://data.ajenti.com.au/KIREIP/index.html
}
if __name__ == '__main__':
|
{"golden_diff": "diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py\n--- a/parsers/AUS_TAS_KI.py\n+++ b/parsers/AUS_TAS_KI.py\n@@ -52,12 +52,10 @@\n return technologies_parsed, biodiesel_percent\n \n # Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\n-def format_storage_techs(technologies_parsed):\n+def sum_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n- battery_production = storage_techs if storage_techs > 0 else 0\n- battery_storage = storage_techs if storage_techs < 0 else 0\n \n- return battery_production, battery_storage\n+ return storage_techs\n \n def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n \n@@ -66,12 +64,11 @@\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n- battery_production, battery_storage = format_storage_techs(technologies_parsed)\n+ storage_techs = sum_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n- 'battery discharge': battery_production,\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n@@ -84,9 +81,9 @@\n 'unknown': 0\n },\n 'storage': {\n- 'battery': battery_storage*-1\n+ 'battery': storage_techs*-1 #Somewhat counterintuitively,to ElectricityMap positive means charging and negative means discharging\n },\n- 'source': 'https://data.ajenti.com.au/KIREIP/index.html'\n+ 'source': 'https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island' #Iframe: https://data.ajenti.com.au/KIREIP/index.html\n }\n \n if __name__ == '__main__':\n", "issue": "King Island: battery never seems to discharge \nI've been keeping an eye on AUS-TAS-KI since it was added to the map. Charging works fine, discharging doesn't show up.\n", "before_files": [{"content": "# Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456\n# Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636\n# A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island\n# As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html\n# About the data, the feed we get seems to be counters with a 2 seconds interval.\n# That means that if we fetch these counters every 15 minutes, we only are reading \"instantaneous\" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.\n\nimport asyncio\nimport json\nimport logging\nimport arrow\nfrom signalr import Connection\nfrom requests import Session\n\nclass SignalR:\n def __init__(self, url):\n self.url = url\n \n def update_res(self, msg):\n if (msg != {}):\n self.res = msg\n\n def get_value(self, hub, method):\n self.res = {}\n with Session() as session:\n #create a connection\n connection = Connection(self.url, session)\n chat = connection.register_hub(hub)\n chat.client.on(method, self.update_res)\n connection.start()\n connection.wait(3)\n connection.close()\n return self.res\n \ndef parse_payload(logger, payload):\n technologies_parsed = {}\n if not 'technologies' in payload:\n raise KeyError(\n f\"No 'technologies' in payload\\n\"\n f\"serie : {json.dumps(payload)}\"\n )\n else:\n logger.debug(f\"serie : {json.dumps(payload)}\")\n for technology in payload['technologies']:\n assert technology['unit'] == 'kW'\n # The upstream API gives us kW, we need MW\n technologies_parsed[technology['id']] = int(technology['value'])/1000\n logger.debug(f\"production : {json.dumps(technologies_parsed)}\")\n\n biodiesel_percent = payload['biodiesel']['percent']\n\n return technologies_parsed, biodiesel_percent\n\n# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\ndef format_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n battery_production = storage_techs if storage_techs > 0 else 0\n battery_storage = storage_techs if storage_techs < 0 else 0\n\n return battery_production, battery_storage\n\ndef fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n\n if target_datetime is not None:\n raise NotImplementedError('The datasource currently implemented is only real time')\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n battery_production, battery_storage = format_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n 'battery discharge': battery_production,\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,\n 'solar': technologies_parsed['solar'],\n 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption\n 'geothermal': 0,\n 'unknown': 0\n },\n 'storage': {\n 'battery': battery_storage*-1\n },\n 'source': 'https://data.ajenti.com.au/KIREIP/index.html'\n }\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AUS_TAS_KI.py"}], "after_files": [{"content": "# Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456\n# Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636\n# A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island\n# As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html\n# About the data, the feed we get seems to be counters with a 2 seconds interval.\n# That means that if we fetch these counters every 15 minutes, we only are reading \"instantaneous\" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.\n\nimport asyncio\nimport json\nimport logging\nimport arrow\nfrom signalr import Connection\nfrom requests import Session\n\nclass SignalR:\n def __init__(self, url):\n self.url = url\n \n def update_res(self, msg):\n if (msg != {}):\n self.res = msg\n\n def get_value(self, hub, method):\n self.res = {}\n with Session() as session:\n #create a connection\n connection = Connection(self.url, session)\n chat = connection.register_hub(hub)\n chat.client.on(method, self.update_res)\n connection.start()\n connection.wait(3)\n connection.close()\n return self.res\n \ndef parse_payload(logger, payload):\n technologies_parsed = {}\n if not 'technologies' in payload:\n raise KeyError(\n f\"No 'technologies' in payload\\n\"\n f\"serie : {json.dumps(payload)}\"\n )\n else:\n logger.debug(f\"serie : {json.dumps(payload)}\")\n for technology in payload['technologies']:\n assert technology['unit'] == 'kW'\n # The upstream API gives us kW, we need MW\n technologies_parsed[technology['id']] = int(technology['value'])/1000\n logger.debug(f\"production : {json.dumps(technologies_parsed)}\")\n\n biodiesel_percent = payload['biodiesel']['percent']\n\n return technologies_parsed, biodiesel_percent\n\n# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\ndef sum_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n\n return storage_techs\n\ndef fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n\n if target_datetime is not None:\n raise NotImplementedError('The datasource currently implemented is only real time')\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n storage_techs = sum_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,\n 'solar': technologies_parsed['solar'],\n 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption\n 'geothermal': 0,\n 'unknown': 0\n },\n 'storage': {\n 'battery': storage_techs*-1 #Somewhat counterintuitively,to ElectricityMap positive means charging and negative means discharging\n },\n 'source': 'https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island' #Iframe: https://data.ajenti.com.au/KIREIP/index.html\n }\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AUS_TAS_KI.py"}]}
| 1,447 | 540 |
gh_patches_debug_3443
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1971
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggestion to make the recommendation in the `msgvalue-inside-a-loop` detector wiki clearer
### Describe the desired feature
Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#msgvalue-inside-a-loop
This is the current recommendation for the `msgvalue-inside-a-loop` detector:
```solidity
Track msg.value through a local variable and decrease its amount on every iteration/usage.
```
This is a vague recommendation - it does not address the issue head-on, i.e., what mathematical technique the developer should use to remedy the bug.
My suggestions:
1. Recommend dividing by the number of `receivers`
2. Recommend providing an explicit array of amounts alongside the `receivers` array, and check that the sum of each element in that array matches `msg.value`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/statements/msg_value_in_loop.py`
Content:
```
1 from typing import List, Optional
2 from slither.core.cfg.node import NodeType, Node
3 from slither.detectors.abstract_detector import (
4 AbstractDetector,
5 DetectorClassification,
6 DETECTOR_INFO,
7 )
8 from slither.slithir.operations import InternalCall
9 from slither.core.declarations import SolidityVariableComposed, Contract
10 from slither.utils.output import Output
11
12
13 def detect_msg_value_in_loop(contract: Contract) -> List[Node]:
14 results: List[Node] = []
15 for f in contract.functions_entry_points:
16 if f.is_implemented and f.payable:
17 msg_value_in_loop(f.entry_point, 0, [], results)
18 return results
19
20
21 def msg_value_in_loop(
22 node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]
23 ) -> None:
24
25 if node is None:
26 return
27
28 if node in visited:
29 return
30 # shared visited
31 visited.append(node)
32
33 if node.type == NodeType.STARTLOOP:
34 in_loop_counter += 1
35 elif node.type == NodeType.ENDLOOP:
36 in_loop_counter -= 1
37
38 for ir in node.all_slithir_operations():
39 if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read:
40 results.append(ir.node)
41 if isinstance(ir, (InternalCall)):
42 msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)
43
44 for son in node.sons:
45 msg_value_in_loop(son, in_loop_counter, visited, results)
46
47
48 class MsgValueInLoop(AbstractDetector):
49 """
50 Detect the use of msg.value inside a loop
51 """
52
53 ARGUMENT = "msg-value-loop"
54 HELP = "msg.value inside a loop"
55 IMPACT = DetectorClassification.HIGH
56 CONFIDENCE = DetectorClassification.MEDIUM
57
58 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop"
59
60 WIKI_TITLE = "`msg.value` inside a loop"
61 WIKI_DESCRIPTION = "Detect the use of `msg.value` inside a loop."
62
63 # region wiki_exploit_scenario
64 WIKI_EXPLOIT_SCENARIO = """
65 ```solidity
66 contract MsgValueInLoop{
67
68 mapping (address => uint256) balances;
69
70 function bad(address[] memory receivers) public payable {
71 for (uint256 i=0; i < receivers.length; i++) {
72 balances[receivers[i]] += msg.value;
73 }
74 }
75
76 }
77 ```
78 """
79 # endregion wiki_exploit_scenario
80
81 WIKI_RECOMMENDATION = """
82 Track msg.value through a local variable and decrease its amount on every iteration/usage.
83 """
84
85 def _detect(self) -> List[Output]:
86 """"""
87 results: List[Output] = []
88 for c in self.compilation_unit.contracts_derived:
89 values = detect_msg_value_in_loop(c)
90 for node in values:
91 func = node.function
92
93 info: DETECTOR_INFO = [func, " use msg.value in a loop: ", node, "\n"]
94 res = self.generate_result(info)
95 results.append(res)
96
97 return results
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py
--- a/slither/detectors/statements/msg_value_in_loop.py
+++ b/slither/detectors/statements/msg_value_in_loop.py
@@ -79,7 +79,7 @@
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = """
-Track msg.value through a local variable and decrease its amount on every iteration/usage.
+Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.
"""
def _detect(self) -> List[Output]:
|
{"golden_diff": "diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py\n--- a/slither/detectors/statements/msg_value_in_loop.py\n+++ b/slither/detectors/statements/msg_value_in_loop.py\n@@ -79,7 +79,7 @@\n # endregion wiki_exploit_scenario\n \n WIKI_RECOMMENDATION = \"\"\"\n-Track msg.value through a local variable and decrease its amount on every iteration/usage.\n+Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n \"\"\"\n \n def _detect(self) -> List[Output]:\n", "issue": "Suggestion to make the recommendation in the `msgvalue-inside-a-loop` detector wiki clearer\n### Describe the desired feature\n\nReference: https://github.com/crytic/slither/wiki/Detector-Documentation#msgvalue-inside-a-loop\r\n\r\nThis is the current recommendation for the `msgvalue-inside-a-loop` detector:\r\n\r\n```solidity\r\nTrack msg.value through a local variable and decrease its amount on every iteration/usage.\r\n```\r\n\r\nThis is a vague recommendation - it does not address the issue head-on, i.e., what mathematical technique the developer should use to remedy the bug.\r\n\r\nMy suggestions:\r\n\r\n1. Recommend dividing by the number of `receivers`\r\n2. Recommend providing an explicit array of amounts alongside the `receivers` array, and check that the sum of each element in that array matches `msg.value`\n", "before_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nTrack msg.value through a local variable and decrease its amount on every iteration/usage.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}], "after_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nProvide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}]}
| 1,317 | 146 |
gh_patches_debug_11982
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-841
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add __repr__ to XmonDevice
See e.g. `TrialResult` or `Circuit` for how these should work.
```python
def __repr__(self):
return ('TrialResult(params={!r}, '
'repetitions={!r}, '
'measurements={!r})').format(self.params,
self.repetitions,
self.measurements)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/google/xmon_device.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Iterable, cast, Optional, List, TYPE_CHECKING
16
17 from cirq import ops, circuits
18 from cirq.devices import Device
19 from cirq.google import xmon_gates, convert_to_xmon_gates
20 from cirq.google.xmon_gate_extensions import xmon_gate_ext
21 from cirq.devices.grid_qubit import GridQubit
22 from cirq.value import Duration
23
24 from cirq.circuits import TextDiagramDrawer
25
26
27 if TYPE_CHECKING:
28 # pylint: disable=unused-import
29 from typing import Set
30
31
32 class XmonDevice(Device):
33 """A device with qubits placed in a grid. Neighboring qubits can interact.
34 """
35
36 def __init__(self,
37 measurement_duration: Duration,
38 exp_w_duration: Duration,
39 exp_11_duration: Duration,
40 qubits: Iterable[GridQubit]) -> None:
41 """Initializes the description of an xmon device.
42
43 Args:
44 measurement_duration: The maximum duration of a measurement.
45 exp_w_duration: The maximum duration of an ExpW operation.
46 exp_11_duration: The maximum duration of an ExpZ operation.
47 qubits: Qubits on the device, identified by their x, y location.
48 """
49 self._measurement_duration = measurement_duration
50 self._exp_w_duration = exp_w_duration
51 self._exp_z_duration = exp_11_duration
52 self.qubits = frozenset(qubits)
53
54 def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
55 return convert_to_xmon_gates.ConvertToXmonGates().convert(operation)
56
57 def neighbors_of(self, qubit: GridQubit):
58 """Returns the qubits that the given qubit can interact with."""
59 possibles = [
60 GridQubit(qubit.row + 1, qubit.col),
61 GridQubit(qubit.row - 1, qubit.col),
62 GridQubit(qubit.row, qubit.col + 1),
63 GridQubit(qubit.row, qubit.col - 1),
64 ]
65 return [e for e in possibles if e in self.qubits]
66
67 def duration_of(self, operation):
68 if isinstance(operation, ops.GateOperation):
69 g = xmon_gate_ext.try_cast(xmon_gates.XmonGate, operation.gate)
70 if isinstance(g, xmon_gates.Exp11Gate):
71 return self._exp_z_duration
72 if isinstance(g, xmon_gates.ExpWGate):
73 return self._exp_w_duration
74 if isinstance(g, xmon_gates.XmonMeasurementGate):
75 return self._measurement_duration
76 if isinstance(g, xmon_gates.ExpZGate):
77 # Z gates are performed in the control software.
78 return Duration()
79 raise ValueError('Unsupported gate type: {!r}'.format(operation))
80
81 def validate_gate(self, gate: ops.Gate):
82 """Raises an error if the given gate isn't allowed.
83
84 Raises:
85 ValueError: Unsupported gate.
86 """
87 if not isinstance(gate, (xmon_gates.Exp11Gate,
88 xmon_gates.ExpWGate,
89 xmon_gates.XmonMeasurementGate,
90 xmon_gates.ExpZGate)):
91 raise ValueError('Unsupported gate type: {!r}'.format(gate))
92
93 def validate_operation(self, operation: ops.Operation):
94 if not isinstance(operation, ops.GateOperation):
95 raise ValueError('Unsupported operation: {!r}'.format(operation))
96
97 self.validate_gate(operation.gate)
98
99 for q in operation.qubits:
100 if not isinstance(q, GridQubit):
101 raise ValueError('Unsupported qubit type: {!r}'.format(q))
102 if q not in self.qubits:
103 raise ValueError('Qubit not on device: {!r}'.format(q))
104
105 if (len(operation.qubits) == 2
106 and not isinstance(operation.gate,
107 xmon_gates.XmonMeasurementGate)):
108 p, q = operation.qubits
109 if not cast(GridQubit, p).is_adjacent(q):
110 raise ValueError(
111 'Non-local interaction: {!r}.'.format(operation))
112
113 def _check_if_exp11_operation_interacts_with_any(
114 self,
115 exp11_op: ops.GateOperation,
116 others: Iterable[ops.GateOperation]) -> bool:
117 return any(self._check_if_exp11_operation_interacts(exp11_op, op)
118 for op in others)
119
120 def _check_if_exp11_operation_interacts(
121 self,
122 exp11_op: ops.GateOperation,
123 other_op: ops.GateOperation) -> bool:
124 if isinstance(other_op.gate, xmon_gates.ExpZGate):
125 return False
126 if isinstance(other_op.gate, xmon_gates.ExpWGate):
127 return False
128
129 return any(cast(GridQubit, q).is_adjacent(cast(GridQubit, p))
130 for q in exp11_op.qubits
131 for p in other_op.qubits)
132
133 def validate_scheduled_operation(self, schedule, scheduled_operation):
134 self.validate_operation(scheduled_operation.operation)
135
136 if isinstance(scheduled_operation.operation.gate,
137 xmon_gates.Exp11Gate):
138 for other in schedule.operations_happening_at_same_time_as(
139 scheduled_operation):
140 if self._check_if_exp11_operation_interacts(
141 cast(ops.GateOperation, scheduled_operation.operation),
142 cast(ops.GateOperation, other.operation)):
143 raise ValueError(
144 'Adjacent Exp11 operations: {} vs {}.'.format(
145 scheduled_operation, other))
146
147 def validate_circuit(self, circuit: circuits.Circuit):
148 super().validate_circuit(circuit)
149 _verify_unique_measurement_keys(circuit.all_operations())
150
151 def validate_moment(self, moment: circuits.Moment):
152 super().validate_moment(moment)
153 for op in moment.operations:
154 if (isinstance(op, ops.GateOperation) and
155 isinstance(op.gate, xmon_gates.Exp11Gate)):
156 for other in moment.operations:
157 if (other is not op and
158 self._check_if_exp11_operation_interacts(
159 cast(ops.GateOperation, op),
160 cast(ops.GateOperation, other))):
161 raise ValueError(
162 'Adjacent Exp11 operations: {}.'.format(moment))
163
164 def can_add_operation_into_moment(self,
165 operation: ops.Operation,
166 moment: circuits.Moment) -> bool:
167 self.validate_moment(moment)
168
169 if not super().can_add_operation_into_moment(operation, moment):
170 return False
171 if (isinstance(operation, ops.GateOperation) and
172 isinstance(operation.gate, xmon_gates.Exp11Gate)):
173 return not self._check_if_exp11_operation_interacts_with_any(
174 cast(ops.GateOperation, operation),
175 cast(Iterable[ops.GateOperation], moment.operations))
176 return True
177
178 def validate_schedule(self, schedule):
179 _verify_unique_measurement_keys(
180 s.operation for s in schedule.scheduled_operations)
181 for scheduled_operation in schedule.scheduled_operations:
182 self.validate_scheduled_operation(schedule, scheduled_operation)
183
184 def at(self, row: int, col: int) -> Optional[GridQubit]:
185 """Returns the qubit at the given position, if there is one, else None.
186 """
187 q = GridQubit(row, col)
188 return q if q in self.qubits else None
189
190 def row(self, row: int) -> List[GridQubit]:
191 """Returns the qubits in the given row, in ascending order."""
192 return sorted(q for q in self.qubits if q.row == row)
193
194 def col(self, col: int) -> List[GridQubit]:
195 """Returns the qubits in the given column, in ascending order."""
196 return sorted(q for q in self.qubits if q.col == col)
197
198 def __str__(self):
199 diagram = TextDiagramDrawer()
200
201 for q in self.qubits:
202 diagram.write(q.col, q.row, str(q))
203 for q2 in self.neighbors_of(q):
204 diagram.grid_line(q.col, q.row, q2.col, q2.row)
205
206 return diagram.render(
207 horizontal_spacing=3,
208 vertical_spacing=2,
209 use_unicode_characters=True)
210
211 def __eq__(self, other):
212 if not isinstance(other, (XmonDevice, type(self))):
213 return NotImplemented
214 return (self._measurement_duration == other._measurement_duration and
215 self._exp_w_duration == other._exp_w_duration and
216 self._exp_z_duration == other._exp_z_duration and
217 self.qubits == other.qubits)
218
219 def __ne__(self, other):
220 return not self == other
221
222 def __hash__(self):
223 return hash((XmonDevice, self._measurement_duration,
224 self._exp_w_duration, self._exp_z_duration, self.qubits))
225
226
227 def _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):
228 seen = set() # type: Set[str]
229 for op in operations:
230 if ops.MeasurementGate.is_measurement(op):
231 key = cast(ops.MeasurementGate,
232 cast(ops.GateOperation, op).gate).key
233 if key in seen:
234 raise ValueError('Measurement key {} repeated'.format(key))
235 seen.add(key)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/google/xmon_device.py b/cirq/google/xmon_device.py
--- a/cirq/google/xmon_device.py
+++ b/cirq/google/xmon_device.py
@@ -195,6 +195,15 @@
"""Returns the qubits in the given column, in ascending order."""
return sorted(q for q in self.qubits if q.col == col)
+ def __repr__(self):
+ return ('XmonDevice(measurement_duration={!r}, '
+ 'exp_w_duration={!r}, '
+ 'exp_11_duration={!r} '
+ 'qubits={!r})').format(self._measurement_duration,
+ self._exp_w_duration,
+ self._exp_z_duration,
+ sorted(self.qubits))
+
def __str__(self):
diagram = TextDiagramDrawer()
|
{"golden_diff": "diff --git a/cirq/google/xmon_device.py b/cirq/google/xmon_device.py\n--- a/cirq/google/xmon_device.py\n+++ b/cirq/google/xmon_device.py\n@@ -195,6 +195,15 @@\n \"\"\"Returns the qubits in the given column, in ascending order.\"\"\"\n return sorted(q for q in self.qubits if q.col == col)\n \n+ def __repr__(self):\n+ return ('XmonDevice(measurement_duration={!r}, '\n+ 'exp_w_duration={!r}, '\n+ 'exp_11_duration={!r} '\n+ 'qubits={!r})').format(self._measurement_duration,\n+ self._exp_w_duration,\n+ self._exp_z_duration,\n+ sorted(self.qubits))\n+\n def __str__(self):\n diagram = TextDiagramDrawer()\n", "issue": "Add __repr__ to XmonDevice\nSee e.g. `TrialResult` or `Circuit` for how these should work.\r\n\r\n```python\r\n def __repr__(self):\r\n return ('TrialResult(params={!r}, '\r\n 'repetitions={!r}, '\r\n 'measurements={!r})').format(self.params,\r\n self.repetitions,\r\n self.measurements)\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Iterable, cast, Optional, List, TYPE_CHECKING\n\nfrom cirq import ops, circuits\nfrom cirq.devices import Device\nfrom cirq.google import xmon_gates, convert_to_xmon_gates\nfrom cirq.google.xmon_gate_extensions import xmon_gate_ext\nfrom cirq.devices.grid_qubit import GridQubit\nfrom cirq.value import Duration\n\nfrom cirq.circuits import TextDiagramDrawer\n\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from typing import Set\n\n\nclass XmonDevice(Device):\n \"\"\"A device with qubits placed in a grid. Neighboring qubits can interact.\n \"\"\"\n\n def __init__(self,\n measurement_duration: Duration,\n exp_w_duration: Duration,\n exp_11_duration: Duration,\n qubits: Iterable[GridQubit]) -> None:\n \"\"\"Initializes the description of an xmon device.\n\n Args:\n measurement_duration: The maximum duration of a measurement.\n exp_w_duration: The maximum duration of an ExpW operation.\n exp_11_duration: The maximum duration of an ExpZ operation.\n qubits: Qubits on the device, identified by their x, y location.\n \"\"\"\n self._measurement_duration = measurement_duration\n self._exp_w_duration = exp_w_duration\n self._exp_z_duration = exp_11_duration\n self.qubits = frozenset(qubits)\n\n def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:\n return convert_to_xmon_gates.ConvertToXmonGates().convert(operation)\n\n def neighbors_of(self, qubit: GridQubit):\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n GridQubit(qubit.row + 1, qubit.col),\n GridQubit(qubit.row - 1, qubit.col),\n GridQubit(qubit.row, qubit.col + 1),\n GridQubit(qubit.row, qubit.col - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def duration_of(self, operation):\n if isinstance(operation, ops.GateOperation):\n g = xmon_gate_ext.try_cast(xmon_gates.XmonGate, operation.gate)\n if isinstance(g, xmon_gates.Exp11Gate):\n return self._exp_z_duration\n if isinstance(g, xmon_gates.ExpWGate):\n return self._exp_w_duration\n if isinstance(g, xmon_gates.XmonMeasurementGate):\n return self._measurement_duration\n if isinstance(g, xmon_gates.ExpZGate):\n # Z gates are performed in the control software.\n return Duration()\n raise ValueError('Unsupported gate type: {!r}'.format(operation))\n\n def validate_gate(self, gate: ops.Gate):\n \"\"\"Raises an error if the given gate isn't allowed.\n\n Raises:\n ValueError: Unsupported gate.\n \"\"\"\n if not isinstance(gate, (xmon_gates.Exp11Gate,\n xmon_gates.ExpWGate,\n xmon_gates.XmonMeasurementGate,\n xmon_gates.ExpZGate)):\n raise ValueError('Unsupported gate type: {!r}'.format(gate))\n\n def validate_operation(self, operation: ops.Operation):\n if not isinstance(operation, ops.GateOperation):\n raise ValueError('Unsupported operation: {!r}'.format(operation))\n\n self.validate_gate(operation.gate)\n\n for q in operation.qubits:\n if not isinstance(q, GridQubit):\n raise ValueError('Unsupported qubit type: {!r}'.format(q))\n if q not in self.qubits:\n raise ValueError('Qubit not on device: {!r}'.format(q))\n\n if (len(operation.qubits) == 2\n and not isinstance(operation.gate,\n xmon_gates.XmonMeasurementGate)):\n p, q = operation.qubits\n if not cast(GridQubit, p).is_adjacent(q):\n raise ValueError(\n 'Non-local interaction: {!r}.'.format(operation))\n\n def _check_if_exp11_operation_interacts_with_any(\n self,\n exp11_op: ops.GateOperation,\n others: Iterable[ops.GateOperation]) -> bool:\n return any(self._check_if_exp11_operation_interacts(exp11_op, op)\n for op in others)\n\n def _check_if_exp11_operation_interacts(\n self,\n exp11_op: ops.GateOperation,\n other_op: ops.GateOperation) -> bool:\n if isinstance(other_op.gate, xmon_gates.ExpZGate):\n return False\n if isinstance(other_op.gate, xmon_gates.ExpWGate):\n return False\n\n return any(cast(GridQubit, q).is_adjacent(cast(GridQubit, p))\n for q in exp11_op.qubits\n for p in other_op.qubits)\n\n def validate_scheduled_operation(self, schedule, scheduled_operation):\n self.validate_operation(scheduled_operation.operation)\n\n if isinstance(scheduled_operation.operation.gate,\n xmon_gates.Exp11Gate):\n for other in schedule.operations_happening_at_same_time_as(\n scheduled_operation):\n if self._check_if_exp11_operation_interacts(\n cast(ops.GateOperation, scheduled_operation.operation),\n cast(ops.GateOperation, other.operation)):\n raise ValueError(\n 'Adjacent Exp11 operations: {} vs {}.'.format(\n scheduled_operation, other))\n\n def validate_circuit(self, circuit: circuits.Circuit):\n super().validate_circuit(circuit)\n _verify_unique_measurement_keys(circuit.all_operations())\n\n def validate_moment(self, moment: circuits.Moment):\n super().validate_moment(moment)\n for op in moment.operations:\n if (isinstance(op, ops.GateOperation) and\n isinstance(op.gate, xmon_gates.Exp11Gate)):\n for other in moment.operations:\n if (other is not op and\n self._check_if_exp11_operation_interacts(\n cast(ops.GateOperation, op),\n cast(ops.GateOperation, other))):\n raise ValueError(\n 'Adjacent Exp11 operations: {}.'.format(moment))\n\n def can_add_operation_into_moment(self,\n operation: ops.Operation,\n moment: circuits.Moment) -> bool:\n self.validate_moment(moment)\n\n if not super().can_add_operation_into_moment(operation, moment):\n return False\n if (isinstance(operation, ops.GateOperation) and\n isinstance(operation.gate, xmon_gates.Exp11Gate)):\n return not self._check_if_exp11_operation_interacts_with_any(\n cast(ops.GateOperation, operation),\n cast(Iterable[ops.GateOperation], moment.operations))\n return True\n\n def validate_schedule(self, schedule):\n _verify_unique_measurement_keys(\n s.operation for s in schedule.scheduled_operations)\n for scheduled_operation in schedule.scheduled_operations:\n self.validate_scheduled_operation(schedule, scheduled_operation)\n\n def at(self, row: int, col: int) -> Optional[GridQubit]:\n \"\"\"Returns the qubit at the given position, if there is one, else None.\n \"\"\"\n q = GridQubit(row, col)\n return q if q in self.qubits else None\n\n def row(self, row: int) -> List[GridQubit]:\n \"\"\"Returns the qubits in the given row, in ascending order.\"\"\"\n return sorted(q for q in self.qubits if q.row == row)\n\n def col(self, col: int) -> List[GridQubit]:\n \"\"\"Returns the qubits in the given column, in ascending order.\"\"\"\n return sorted(q for q in self.qubits if q.col == col)\n\n def __str__(self):\n diagram = TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.col, q.row, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.col, q.row, q2.col, q2.row)\n\n return diagram.render(\n horizontal_spacing=3,\n vertical_spacing=2,\n use_unicode_characters=True)\n\n def __eq__(self, other):\n if not isinstance(other, (XmonDevice, type(self))):\n return NotImplemented\n return (self._measurement_duration == other._measurement_duration and\n self._exp_w_duration == other._exp_w_duration and\n self._exp_z_duration == other._exp_z_duration and\n self.qubits == other.qubits)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((XmonDevice, self._measurement_duration,\n self._exp_w_duration, self._exp_z_duration, self.qubits))\n\n\ndef _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):\n seen = set() # type: Set[str]\n for op in operations:\n if ops.MeasurementGate.is_measurement(op):\n key = cast(ops.MeasurementGate,\n cast(ops.GateOperation, op).gate).key\n if key in seen:\n raise ValueError('Measurement key {} repeated'.format(key))\n seen.add(key)\n", "path": "cirq/google/xmon_device.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Iterable, cast, Optional, List, TYPE_CHECKING\n\nfrom cirq import ops, circuits\nfrom cirq.devices import Device\nfrom cirq.google import xmon_gates, convert_to_xmon_gates\nfrom cirq.google.xmon_gate_extensions import xmon_gate_ext\nfrom cirq.devices.grid_qubit import GridQubit\nfrom cirq.value import Duration\n\nfrom cirq.circuits import TextDiagramDrawer\n\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n from typing import Set\n\n\nclass XmonDevice(Device):\n \"\"\"A device with qubits placed in a grid. Neighboring qubits can interact.\n \"\"\"\n\n def __init__(self,\n measurement_duration: Duration,\n exp_w_duration: Duration,\n exp_11_duration: Duration,\n qubits: Iterable[GridQubit]) -> None:\n \"\"\"Initializes the description of an xmon device.\n\n Args:\n measurement_duration: The maximum duration of a measurement.\n exp_w_duration: The maximum duration of an ExpW operation.\n exp_11_duration: The maximum duration of an ExpZ operation.\n qubits: Qubits on the device, identified by their x, y location.\n \"\"\"\n self._measurement_duration = measurement_duration\n self._exp_w_duration = exp_w_duration\n self._exp_z_duration = exp_11_duration\n self.qubits = frozenset(qubits)\n\n def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:\n return convert_to_xmon_gates.ConvertToXmonGates().convert(operation)\n\n def neighbors_of(self, qubit: GridQubit):\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n GridQubit(qubit.row + 1, qubit.col),\n GridQubit(qubit.row - 1, qubit.col),\n GridQubit(qubit.row, qubit.col + 1),\n GridQubit(qubit.row, qubit.col - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def duration_of(self, operation):\n if isinstance(operation, ops.GateOperation):\n g = xmon_gate_ext.try_cast(xmon_gates.XmonGate, operation.gate)\n if isinstance(g, xmon_gates.Exp11Gate):\n return self._exp_z_duration\n if isinstance(g, xmon_gates.ExpWGate):\n return self._exp_w_duration\n if isinstance(g, xmon_gates.XmonMeasurementGate):\n return self._measurement_duration\n if isinstance(g, xmon_gates.ExpZGate):\n # Z gates are performed in the control software.\n return Duration()\n raise ValueError('Unsupported gate type: {!r}'.format(operation))\n\n def validate_gate(self, gate: ops.Gate):\n \"\"\"Raises an error if the given gate isn't allowed.\n\n Raises:\n ValueError: Unsupported gate.\n \"\"\"\n if not isinstance(gate, (xmon_gates.Exp11Gate,\n xmon_gates.ExpWGate,\n xmon_gates.XmonMeasurementGate,\n xmon_gates.ExpZGate)):\n raise ValueError('Unsupported gate type: {!r}'.format(gate))\n\n def validate_operation(self, operation: ops.Operation):\n if not isinstance(operation, ops.GateOperation):\n raise ValueError('Unsupported operation: {!r}'.format(operation))\n\n self.validate_gate(operation.gate)\n\n for q in operation.qubits:\n if not isinstance(q, GridQubit):\n raise ValueError('Unsupported qubit type: {!r}'.format(q))\n if q not in self.qubits:\n raise ValueError('Qubit not on device: {!r}'.format(q))\n\n if (len(operation.qubits) == 2\n and not isinstance(operation.gate,\n xmon_gates.XmonMeasurementGate)):\n p, q = operation.qubits\n if not cast(GridQubit, p).is_adjacent(q):\n raise ValueError(\n 'Non-local interaction: {!r}.'.format(operation))\n\n def _check_if_exp11_operation_interacts_with_any(\n self,\n exp11_op: ops.GateOperation,\n others: Iterable[ops.GateOperation]) -> bool:\n return any(self._check_if_exp11_operation_interacts(exp11_op, op)\n for op in others)\n\n def _check_if_exp11_operation_interacts(\n self,\n exp11_op: ops.GateOperation,\n other_op: ops.GateOperation) -> bool:\n if isinstance(other_op.gate, xmon_gates.ExpZGate):\n return False\n if isinstance(other_op.gate, xmon_gates.ExpWGate):\n return False\n\n return any(cast(GridQubit, q).is_adjacent(cast(GridQubit, p))\n for q in exp11_op.qubits\n for p in other_op.qubits)\n\n def validate_scheduled_operation(self, schedule, scheduled_operation):\n self.validate_operation(scheduled_operation.operation)\n\n if isinstance(scheduled_operation.operation.gate,\n xmon_gates.Exp11Gate):\n for other in schedule.operations_happening_at_same_time_as(\n scheduled_operation):\n if self._check_if_exp11_operation_interacts(\n cast(ops.GateOperation, scheduled_operation.operation),\n cast(ops.GateOperation, other.operation)):\n raise ValueError(\n 'Adjacent Exp11 operations: {} vs {}.'.format(\n scheduled_operation, other))\n\n def validate_circuit(self, circuit: circuits.Circuit):\n super().validate_circuit(circuit)\n _verify_unique_measurement_keys(circuit.all_operations())\n\n def validate_moment(self, moment: circuits.Moment):\n super().validate_moment(moment)\n for op in moment.operations:\n if (isinstance(op, ops.GateOperation) and\n isinstance(op.gate, xmon_gates.Exp11Gate)):\n for other in moment.operations:\n if (other is not op and\n self._check_if_exp11_operation_interacts(\n cast(ops.GateOperation, op),\n cast(ops.GateOperation, other))):\n raise ValueError(\n 'Adjacent Exp11 operations: {}.'.format(moment))\n\n def can_add_operation_into_moment(self,\n operation: ops.Operation,\n moment: circuits.Moment) -> bool:\n self.validate_moment(moment)\n\n if not super().can_add_operation_into_moment(operation, moment):\n return False\n if (isinstance(operation, ops.GateOperation) and\n isinstance(operation.gate, xmon_gates.Exp11Gate)):\n return not self._check_if_exp11_operation_interacts_with_any(\n cast(ops.GateOperation, operation),\n cast(Iterable[ops.GateOperation], moment.operations))\n return True\n\n def validate_schedule(self, schedule):\n _verify_unique_measurement_keys(\n s.operation for s in schedule.scheduled_operations)\n for scheduled_operation in schedule.scheduled_operations:\n self.validate_scheduled_operation(schedule, scheduled_operation)\n\n def at(self, row: int, col: int) -> Optional[GridQubit]:\n \"\"\"Returns the qubit at the given position, if there is one, else None.\n \"\"\"\n q = GridQubit(row, col)\n return q if q in self.qubits else None\n\n def row(self, row: int) -> List[GridQubit]:\n \"\"\"Returns the qubits in the given row, in ascending order.\"\"\"\n return sorted(q for q in self.qubits if q.row == row)\n\n def col(self, col: int) -> List[GridQubit]:\n \"\"\"Returns the qubits in the given column, in ascending order.\"\"\"\n return sorted(q for q in self.qubits if q.col == col)\n\n def __repr__(self):\n return ('XmonDevice(measurement_duration={!r}, '\n 'exp_w_duration={!r}, '\n 'exp_11_duration={!r} '\n 'qubits={!r})').format(self._measurement_duration,\n self._exp_w_duration,\n self._exp_z_duration,\n sorted(self.qubits))\n\n def __str__(self):\n diagram = TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.col, q.row, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.col, q.row, q2.col, q2.row)\n\n return diagram.render(\n horizontal_spacing=3,\n vertical_spacing=2,\n use_unicode_characters=True)\n\n def __eq__(self, other):\n if not isinstance(other, (XmonDevice, type(self))):\n return NotImplemented\n return (self._measurement_duration == other._measurement_duration and\n self._exp_w_duration == other._exp_w_duration and\n self._exp_z_duration == other._exp_z_duration and\n self.qubits == other.qubits)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((XmonDevice, self._measurement_duration,\n self._exp_w_duration, self._exp_z_duration, self.qubits))\n\n\ndef _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):\n seen = set() # type: Set[str]\n for op in operations:\n if ops.MeasurementGate.is_measurement(op):\n key = cast(ops.MeasurementGate,\n cast(ops.GateOperation, op).gate).key\n if key in seen:\n raise ValueError('Measurement key {} repeated'.format(key))\n seen.add(key)\n", "path": "cirq/google/xmon_device.py"}]}
| 3,069 | 184 |
gh_patches_debug_29162
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-103
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove /auth/user endpoint
<!--
Use a concise title that describes the problem. Include key words from error messages.
Bad: Problem with newsletter form
Good: Newsletter form missing email address field
Bad: Issue with website
Good: Footer missing from homepage
-->
## Description
Clients in general should use the graph QL interface to gather and manipulate data, right now this enpoint is conflicting with that. It was created to provide the initial authenticated user data, but using the data provided by the JWT token this is not needed anymore.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/auth/views.py`
Content:
```
1 import json
2
3 from django.conf import settings
4 from django.contrib.auth import get_user_model
5 from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
6 from django.views import View
7
8 from .mixins import AuthenticationRequiredMixin
9 from .providers import AppleProvider, GoogleProvider
10 from .services import AccountService, JWTService
11
12 User = get_user_model()
13
14
15 class AbstractAuthorizeView(View):
16 def get(self, request, *args, **kwargs):
17 return JsonResponse({"request_url": self.provider.login_url()})
18
19 @property
20 def provider(self):
21 return NotImplementedError("AbstractAuthorizeView must be inherited")
22
23
24 class GoogleAuthorizeView(AbstractAuthorizeView):
25 @property
26 def provider(self):
27 return GoogleProvider
28
29
30 class AppleAuthorizeView(AbstractAuthorizeView):
31 @property
32 def provider(self):
33 return AppleProvider
34
35
36 class AbstractCallbackView(View):
37 def get(self, request, *args, **kwargs):
38 self.authorization_code = self.request.GET.get("code")
39 self.error = self.request.GET.get("error")
40
41 return self.process_callback()
42
43 def post(self, request, *args, **kwargs):
44 self.authorization_code = self.request.POST.get("code")
45 self.error = self.request.POST.get("error")
46
47 return self.process_callback()
48
49 def process_callback(self):
50 if self.error:
51 return HttpResponse(f"Error: {self.error}", status=400)
52
53 if not self.authorization_code:
54 return HttpResponse("Error: no authorization code informed", status=400)
55
56 jwt_service = JWTService()
57
58 try:
59 user = self.process_signup()
60 access_token = jwt_service.create_access_token(user)
61 refresh_token = jwt_service.create_refresh_token(user)
62 except Exception as exc:
63 return HttpResponse(f"Error: {exc}", status=400)
64
65 response = HttpResponseRedirect(settings.WEB_CLIENT_URL)
66 response.set_cookie("atoken", access_token, domain=settings.AUTH_COOKIE_DOMAIN)
67 response.set_cookie("rtoken", refresh_token, domain=settings.AUTH_COOKIE_DOMAIN)
68
69 return response
70
71 def process_signup(self):
72 raise NotImplementedError("AbstractCallbackView must be inherited.")
73
74
75 class GoogleCallbackView(AbstractCallbackView):
76 def process_signup(self):
77 return AccountService().sign_up_with_google(self.authorization_code)
78
79
80 class AppleCallbackView(AbstractCallbackView):
81 def process_signup(self):
82 try:
83 apple_user_data = json.loads(self.request.POST.get("user", "{}"))
84 except json.JSONDecodeError:
85 raise Exception("couldn't parse User data from Apple")
86
87 first_name = apple_user_data.get("name", {}).get("firstName", "")
88 last_name = apple_user_data.get("name", {}).get("lastName", "")
89
90 return AccountService().sign_up_with_apple(
91 self.authorization_code, first_name=first_name, last_name=last_name
92 )
93
94
95 class RefreshAccessTokenView(View):
96 def post(self, request, *args, **kwargs):
97 try:
98 request_data = json.loads(request.body)
99 except json.decoder.JSONDecodeError:
100 return JsonResponse({"error": "The request expects a json body"}, status=400)
101
102 try:
103 refresh_token = request_data["refresh_token"]
104 except KeyError:
105 return JsonResponse(
106 {"error": "The request expects a 'refresh_token' parameter"}, status=400
107 )
108
109 jwt_service = JWTService()
110
111 try:
112 refresh_payload = jwt_service.verify_token(refresh_token)
113 except Exception as exc:
114 return JsonResponse({"error": str(exc)}, status=400)
115
116 try:
117 user = User.objects.get(id=refresh_payload["sub"])
118 except User.DoesNotExist:
119 return JsonResponse({"error": "User not found"}, status=400)
120
121 if not user.is_active:
122 return JsonResponse({"error": "User not found"}, status=400)
123
124 access_token = jwt_service.create_access_token(user)
125 refresh_token = jwt_service.create_refresh_token(user)
126
127 return JsonResponse(
128 {
129 "access_token": access_token,
130 "refresh_token": refresh_token,
131 }
132 )
133
134
135 class CheckUserView(AuthenticationRequiredMixin, View):
136 def get(self, request, *args, **kwargs):
137 user = request.user
138 return JsonResponse(
139 {
140 "user": {
141 "email": user.email,
142 "first_name": user.first_name,
143 "last_name": user.last_name,
144 "profile_image": user.profile_image,
145 }
146 }
147 )
148
```
Path: `terraso_backend/apps/auth/urls.py`
Content:
```
1 from django.urls import path
2 from django.views.decorators.csrf import csrf_exempt
3
4 from apps.auth.views import (
5 AppleAuthorizeView,
6 AppleCallbackView,
7 CheckUserView,
8 GoogleAuthorizeView,
9 GoogleCallbackView,
10 RefreshAccessTokenView,
11 )
12
13 app_name = "apps.auth"
14
15 urlpatterns = [
16 path("apple/authorize", csrf_exempt(AppleAuthorizeView.as_view()), name="apple-authorize"),
17 path(
18 "apple/callback",
19 csrf_exempt(AppleCallbackView.as_view()),
20 name="apple-callback",
21 ),
22 path("google/authorize", csrf_exempt(GoogleAuthorizeView.as_view()), name="google-authorize"),
23 path(
24 "google/callback",
25 csrf_exempt(GoogleCallbackView.as_view()),
26 name="google-callback",
27 ),
28 path("tokens", csrf_exempt(RefreshAccessTokenView.as_view()), name="tokens"),
29 path("user", csrf_exempt(CheckUserView.as_view()), name="user"),
30 ]
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/terraso_backend/apps/auth/urls.py b/terraso_backend/apps/auth/urls.py
--- a/terraso_backend/apps/auth/urls.py
+++ b/terraso_backend/apps/auth/urls.py
@@ -4,7 +4,6 @@
from apps.auth.views import (
AppleAuthorizeView,
AppleCallbackView,
- CheckUserView,
GoogleAuthorizeView,
GoogleCallbackView,
RefreshAccessTokenView,
@@ -26,5 +25,4 @@
name="google-callback",
),
path("tokens", csrf_exempt(RefreshAccessTokenView.as_view()), name="tokens"),
- path("user", csrf_exempt(CheckUserView.as_view()), name="user"),
]
diff --git a/terraso_backend/apps/auth/views.py b/terraso_backend/apps/auth/views.py
--- a/terraso_backend/apps/auth/views.py
+++ b/terraso_backend/apps/auth/views.py
@@ -5,7 +5,6 @@
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views import View
-from .mixins import AuthenticationRequiredMixin
from .providers import AppleProvider, GoogleProvider
from .services import AccountService, JWTService
@@ -130,18 +129,3 @@
"refresh_token": refresh_token,
}
)
-
-
-class CheckUserView(AuthenticationRequiredMixin, View):
- def get(self, request, *args, **kwargs):
- user = request.user
- return JsonResponse(
- {
- "user": {
- "email": user.email,
- "first_name": user.first_name,
- "last_name": user.last_name,
- "profile_image": user.profile_image,
- }
- }
- )
|
{"golden_diff": "diff --git a/terraso_backend/apps/auth/urls.py b/terraso_backend/apps/auth/urls.py\n--- a/terraso_backend/apps/auth/urls.py\n+++ b/terraso_backend/apps/auth/urls.py\n@@ -4,7 +4,6 @@\n from apps.auth.views import (\n AppleAuthorizeView,\n AppleCallbackView,\n- CheckUserView,\n GoogleAuthorizeView,\n GoogleCallbackView,\n RefreshAccessTokenView,\n@@ -26,5 +25,4 @@\n name=\"google-callback\",\n ),\n path(\"tokens\", csrf_exempt(RefreshAccessTokenView.as_view()), name=\"tokens\"),\n- path(\"user\", csrf_exempt(CheckUserView.as_view()), name=\"user\"),\n ]\ndiff --git a/terraso_backend/apps/auth/views.py b/terraso_backend/apps/auth/views.py\n--- a/terraso_backend/apps/auth/views.py\n+++ b/terraso_backend/apps/auth/views.py\n@@ -5,7 +5,6 @@\n from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n from django.views import View\n \n-from .mixins import AuthenticationRequiredMixin\n from .providers import AppleProvider, GoogleProvider\n from .services import AccountService, JWTService\n \n@@ -130,18 +129,3 @@\n \"refresh_token\": refresh_token,\n }\n )\n-\n-\n-class CheckUserView(AuthenticationRequiredMixin, View):\n- def get(self, request, *args, **kwargs):\n- user = request.user\n- return JsonResponse(\n- {\n- \"user\": {\n- \"email\": user.email,\n- \"first_name\": user.first_name,\n- \"last_name\": user.last_name,\n- \"profile_image\": user.profile_image,\n- }\n- }\n- )\n", "issue": "Remove /auth/user endpoint\n<!--\r\nUse a concise title that describes the problem. Include key words from error messages.\r\nBad: Problem with newsletter form\r\nGood: Newsletter form missing email address field\r\n\r\nBad: Issue with website\r\nGood: Footer missing from homepage\r\n-->\r\n\r\n## Description\r\nClients in general should use the graph QL interface to gather and manipulate data, right now this enpoint is conflicting with that. It was created to provide the initial authenticated user data, but using the data provided by the JWT token this is not needed anymore.\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views import View\n\nfrom .mixins import AuthenticationRequiredMixin\nfrom .providers import AppleProvider, GoogleProvider\nfrom .services import AccountService, JWTService\n\nUser = get_user_model()\n\n\nclass AbstractAuthorizeView(View):\n def get(self, request, *args, **kwargs):\n return JsonResponse({\"request_url\": self.provider.login_url()})\n\n @property\n def provider(self):\n return NotImplementedError(\"AbstractAuthorizeView must be inherited\")\n\n\nclass GoogleAuthorizeView(AbstractAuthorizeView):\n @property\n def provider(self):\n return GoogleProvider\n\n\nclass AppleAuthorizeView(AbstractAuthorizeView):\n @property\n def provider(self):\n return AppleProvider\n\n\nclass AbstractCallbackView(View):\n def get(self, request, *args, **kwargs):\n self.authorization_code = self.request.GET.get(\"code\")\n self.error = self.request.GET.get(\"error\")\n\n return self.process_callback()\n\n def post(self, request, *args, **kwargs):\n self.authorization_code = self.request.POST.get(\"code\")\n self.error = self.request.POST.get(\"error\")\n\n return self.process_callback()\n\n def process_callback(self):\n if self.error:\n return HttpResponse(f\"Error: {self.error}\", status=400)\n\n if not self.authorization_code:\n return HttpResponse(\"Error: no authorization code informed\", status=400)\n\n jwt_service = JWTService()\n\n try:\n user = self.process_signup()\n access_token = jwt_service.create_access_token(user)\n refresh_token = jwt_service.create_refresh_token(user)\n except Exception as exc:\n return HttpResponse(f\"Error: {exc}\", status=400)\n\n response = HttpResponseRedirect(settings.WEB_CLIENT_URL)\n response.set_cookie(\"atoken\", access_token, domain=settings.AUTH_COOKIE_DOMAIN)\n response.set_cookie(\"rtoken\", refresh_token, domain=settings.AUTH_COOKIE_DOMAIN)\n\n return response\n\n def process_signup(self):\n raise NotImplementedError(\"AbstractCallbackView must be inherited.\")\n\n\nclass GoogleCallbackView(AbstractCallbackView):\n def process_signup(self):\n return AccountService().sign_up_with_google(self.authorization_code)\n\n\nclass AppleCallbackView(AbstractCallbackView):\n def process_signup(self):\n try:\n apple_user_data = json.loads(self.request.POST.get(\"user\", \"{}\"))\n except json.JSONDecodeError:\n raise Exception(\"couldn't parse User data from Apple\")\n\n first_name = apple_user_data.get(\"name\", {}).get(\"firstName\", \"\")\n last_name = apple_user_data.get(\"name\", {}).get(\"lastName\", \"\")\n\n return AccountService().sign_up_with_apple(\n self.authorization_code, first_name=first_name, last_name=last_name\n )\n\n\nclass RefreshAccessTokenView(View):\n def post(self, request, *args, **kwargs):\n try:\n request_data = json.loads(request.body)\n except json.decoder.JSONDecodeError:\n return JsonResponse({\"error\": \"The request expects a json body\"}, status=400)\n\n try:\n refresh_token = request_data[\"refresh_token\"]\n except KeyError:\n return JsonResponse(\n {\"error\": \"The request expects a 'refresh_token' parameter\"}, status=400\n )\n\n jwt_service = JWTService()\n\n try:\n refresh_payload = jwt_service.verify_token(refresh_token)\n except Exception as exc:\n return JsonResponse({\"error\": str(exc)}, status=400)\n\n try:\n user = User.objects.get(id=refresh_payload[\"sub\"])\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found\"}, status=400)\n\n if not user.is_active:\n return JsonResponse({\"error\": \"User not found\"}, status=400)\n\n access_token = jwt_service.create_access_token(user)\n refresh_token = jwt_service.create_refresh_token(user)\n\n return JsonResponse(\n {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n }\n )\n\n\nclass CheckUserView(AuthenticationRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n user = request.user\n return JsonResponse(\n {\n \"user\": {\n \"email\": user.email,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"profile_image\": user.profile_image,\n }\n }\n )\n", "path": "terraso_backend/apps/auth/views.py"}, {"content": "from django.urls import path\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom apps.auth.views import (\n AppleAuthorizeView,\n AppleCallbackView,\n CheckUserView,\n GoogleAuthorizeView,\n GoogleCallbackView,\n RefreshAccessTokenView,\n)\n\napp_name = \"apps.auth\"\n\nurlpatterns = [\n path(\"apple/authorize\", csrf_exempt(AppleAuthorizeView.as_view()), name=\"apple-authorize\"),\n path(\n \"apple/callback\",\n csrf_exempt(AppleCallbackView.as_view()),\n name=\"apple-callback\",\n ),\n path(\"google/authorize\", csrf_exempt(GoogleAuthorizeView.as_view()), name=\"google-authorize\"),\n path(\n \"google/callback\",\n csrf_exempt(GoogleCallbackView.as_view()),\n name=\"google-callback\",\n ),\n path(\"tokens\", csrf_exempt(RefreshAccessTokenView.as_view()), name=\"tokens\"),\n path(\"user\", csrf_exempt(CheckUserView.as_view()), name=\"user\"),\n]\n", "path": "terraso_backend/apps/auth/urls.py"}], "after_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views import View\n\nfrom .providers import AppleProvider, GoogleProvider\nfrom .services import AccountService, JWTService\n\nUser = get_user_model()\n\n\nclass AbstractAuthorizeView(View):\n def get(self, request, *args, **kwargs):\n return JsonResponse({\"request_url\": self.provider.login_url()})\n\n @property\n def provider(self):\n return NotImplementedError(\"AbstractAuthorizeView must be inherited\")\n\n\nclass GoogleAuthorizeView(AbstractAuthorizeView):\n @property\n def provider(self):\n return GoogleProvider\n\n\nclass AppleAuthorizeView(AbstractAuthorizeView):\n @property\n def provider(self):\n return AppleProvider\n\n\nclass AbstractCallbackView(View):\n def get(self, request, *args, **kwargs):\n self.authorization_code = self.request.GET.get(\"code\")\n self.error = self.request.GET.get(\"error\")\n\n return self.process_callback()\n\n def post(self, request, *args, **kwargs):\n self.authorization_code = self.request.POST.get(\"code\")\n self.error = self.request.POST.get(\"error\")\n\n return self.process_callback()\n\n def process_callback(self):\n if self.error:\n return HttpResponse(f\"Error: {self.error}\", status=400)\n\n if not self.authorization_code:\n return HttpResponse(\"Error: no authorization code informed\", status=400)\n\n jwt_service = JWTService()\n\n try:\n user = self.process_signup()\n access_token = jwt_service.create_access_token(user)\n refresh_token = jwt_service.create_refresh_token(user)\n except Exception as exc:\n return HttpResponse(f\"Error: {exc}\", status=400)\n\n response = HttpResponseRedirect(settings.WEB_CLIENT_URL)\n response.set_cookie(\"atoken\", access_token, domain=settings.AUTH_COOKIE_DOMAIN)\n response.set_cookie(\"rtoken\", refresh_token, domain=settings.AUTH_COOKIE_DOMAIN)\n\n return response\n\n def process_signup(self):\n raise NotImplementedError(\"AbstractCallbackView must be inherited.\")\n\n\nclass GoogleCallbackView(AbstractCallbackView):\n def process_signup(self):\n return AccountService().sign_up_with_google(self.authorization_code)\n\n\nclass AppleCallbackView(AbstractCallbackView):\n def process_signup(self):\n try:\n apple_user_data = json.loads(self.request.POST.get(\"user\", \"{}\"))\n except json.JSONDecodeError:\n raise Exception(\"couldn't parse User data from Apple\")\n\n first_name = apple_user_data.get(\"name\", {}).get(\"firstName\", \"\")\n last_name = apple_user_data.get(\"name\", {}).get(\"lastName\", \"\")\n\n return AccountService().sign_up_with_apple(\n self.authorization_code, first_name=first_name, last_name=last_name\n )\n\n\nclass RefreshAccessTokenView(View):\n def post(self, request, *args, **kwargs):\n try:\n request_data = json.loads(request.body)\n except json.decoder.JSONDecodeError:\n return JsonResponse({\"error\": \"The request expects a json body\"}, status=400)\n\n try:\n refresh_token = request_data[\"refresh_token\"]\n except KeyError:\n return JsonResponse(\n {\"error\": \"The request expects a 'refresh_token' parameter\"}, status=400\n )\n\n jwt_service = JWTService()\n\n try:\n refresh_payload = jwt_service.verify_token(refresh_token)\n except Exception as exc:\n return JsonResponse({\"error\": str(exc)}, status=400)\n\n try:\n user = User.objects.get(id=refresh_payload[\"sub\"])\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found\"}, status=400)\n\n if not user.is_active:\n return JsonResponse({\"error\": \"User not found\"}, status=400)\n\n access_token = jwt_service.create_access_token(user)\n refresh_token = jwt_service.create_refresh_token(user)\n\n return JsonResponse(\n {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n }\n )\n", "path": "terraso_backend/apps/auth/views.py"}, {"content": "from django.urls import path\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom apps.auth.views import (\n AppleAuthorizeView,\n AppleCallbackView,\n GoogleAuthorizeView,\n GoogleCallbackView,\n RefreshAccessTokenView,\n)\n\napp_name = \"apps.auth\"\n\nurlpatterns = [\n path(\"apple/authorize\", csrf_exempt(AppleAuthorizeView.as_view()), name=\"apple-authorize\"),\n path(\n \"apple/callback\",\n csrf_exempt(AppleCallbackView.as_view()),\n name=\"apple-callback\",\n ),\n path(\"google/authorize\", csrf_exempt(GoogleAuthorizeView.as_view()), name=\"google-authorize\"),\n path(\n \"google/callback\",\n csrf_exempt(GoogleCallbackView.as_view()),\n name=\"google-callback\",\n ),\n path(\"tokens\", csrf_exempt(RefreshAccessTokenView.as_view()), name=\"tokens\"),\n]\n", "path": "terraso_backend/apps/auth/urls.py"}]}
| 1,930 | 381 |
gh_patches_debug_24716
|
rasdani/github-patches
|
git_diff
|
encode__httpx-3109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.27.0: replacing `app=app` with `transport=ASGITransport(app)` doesn't type check
```diff
@pytest_asyncio.fixture
async def deprecated_client(deprecated_db_manager: MainDbManager) -> AsyncGenerator[AsyncClient, None]:
async with _make_mocked_test_app(db_manager=deprecated_db_manager, authorization=False) as app:
- async with AsyncClient(app=app, base_url=URL, headers=USER_AGENT) as ac:
+ async with AsyncClient(transport=ASGITransport(app), base_url=URL, headers=USER_AGENT) as ac:
yield ac
```
```
% poetry run mypy .
tests/conftest.py:64: error: Argument 1 to "ASGITransport" has incompatible type "FastAPI"; expected "Callable[[dict[str, Any], Callable[[], Awaitable[dict[str, Any]]], Callable[[dict[str, Any]], Coroutine[None, None, None]]], Coroutine[None, None, None]]" [arg-type]
tests/conftest.py:64: note: "FastAPI.__call__" has type "Callable[[Arg(MutableMapping[str, Any], 'scope'), Arg(Callable[[], Awaitable[MutableMapping[str, Any]]], 'receive'), Arg(Callable[[MutableMapping[str, Any]], Awaitable[None]], 'send')], Coroutine[Any, Any, None]]"
Found 1 error in 1 file (checked 472 source files)
It looks like FastAPI and httpx disagree on the shape of an ASGI application. How am I meant to resolve this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_transports/asgi.py`
Content:
```
1 from __future__ import annotations
2
3 import typing
4
5 import sniffio
6
7 from .._models import Request, Response
8 from .._types import AsyncByteStream
9 from .base import AsyncBaseTransport
10
11 if typing.TYPE_CHECKING: # pragma: no cover
12 import asyncio
13
14 import trio
15
16 Event = typing.Union[asyncio.Event, trio.Event]
17
18
19 _Message = typing.Dict[str, typing.Any]
20 _Receive = typing.Callable[[], typing.Awaitable[_Message]]
21 _Send = typing.Callable[
22 [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]
23 ]
24 _ASGIApp = typing.Callable[
25 [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]
26 ]
27
28 __all__ = ["ASGITransport"]
29
30
31 def create_event() -> Event:
32 if sniffio.current_async_library() == "trio":
33 import trio
34
35 return trio.Event()
36 else:
37 import asyncio
38
39 return asyncio.Event()
40
41
42 class ASGIResponseStream(AsyncByteStream):
43 def __init__(self, body: list[bytes]) -> None:
44 self._body = body
45
46 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
47 yield b"".join(self._body)
48
49
50 class ASGITransport(AsyncBaseTransport):
51 """
52 A custom AsyncTransport that handles sending requests directly to an ASGI app.
53 The simplest way to use this functionality is to use the `app` argument.
54
55 ```
56 client = httpx.AsyncClient(app=app)
57 ```
58
59 Alternatively, you can setup the transport instance explicitly.
60 This allows you to include any additional configuration arguments specific
61 to the ASGITransport class:
62
63 ```
64 transport = httpx.ASGITransport(
65 app=app,
66 root_path="/submount",
67 client=("1.2.3.4", 123)
68 )
69 client = httpx.AsyncClient(transport=transport)
70 ```
71
72 Arguments:
73
74 * `app` - The ASGI application.
75 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
76 should be raised. Default to `True`. Can be set to `False` for use cases
77 such as testing the content of a client 500 response.
78 * `root_path` - The root path on which the ASGI application should be mounted.
79 * `client` - A two-tuple indicating the client IP and port of incoming requests.
80 ```
81 """
82
83 def __init__(
84 self,
85 app: _ASGIApp,
86 raise_app_exceptions: bool = True,
87 root_path: str = "",
88 client: tuple[str, int] = ("127.0.0.1", 123),
89 ) -> None:
90 self.app = app
91 self.raise_app_exceptions = raise_app_exceptions
92 self.root_path = root_path
93 self.client = client
94
95 async def handle_async_request(
96 self,
97 request: Request,
98 ) -> Response:
99 assert isinstance(request.stream, AsyncByteStream)
100
101 # ASGI scope.
102 scope = {
103 "type": "http",
104 "asgi": {"version": "3.0"},
105 "http_version": "1.1",
106 "method": request.method,
107 "headers": [(k.lower(), v) for (k, v) in request.headers.raw],
108 "scheme": request.url.scheme,
109 "path": request.url.path,
110 "raw_path": request.url.raw_path.split(b"?")[0],
111 "query_string": request.url.query,
112 "server": (request.url.host, request.url.port),
113 "client": self.client,
114 "root_path": self.root_path,
115 }
116
117 # Request.
118 request_body_chunks = request.stream.__aiter__()
119 request_complete = False
120
121 # Response.
122 status_code = None
123 response_headers = None
124 body_parts = []
125 response_started = False
126 response_complete = create_event()
127
128 # ASGI callables.
129
130 async def receive() -> dict[str, typing.Any]:
131 nonlocal request_complete
132
133 if request_complete:
134 await response_complete.wait()
135 return {"type": "http.disconnect"}
136
137 try:
138 body = await request_body_chunks.__anext__()
139 except StopAsyncIteration:
140 request_complete = True
141 return {"type": "http.request", "body": b"", "more_body": False}
142 return {"type": "http.request", "body": body, "more_body": True}
143
144 async def send(message: dict[str, typing.Any]) -> None:
145 nonlocal status_code, response_headers, response_started
146
147 if message["type"] == "http.response.start":
148 assert not response_started
149
150 status_code = message["status"]
151 response_headers = message.get("headers", [])
152 response_started = True
153
154 elif message["type"] == "http.response.body":
155 assert not response_complete.is_set()
156 body = message.get("body", b"")
157 more_body = message.get("more_body", False)
158
159 if body and request.method != "HEAD":
160 body_parts.append(body)
161
162 if not more_body:
163 response_complete.set()
164
165 try:
166 await self.app(scope, receive, send)
167 except Exception: # noqa: PIE-786
168 if self.raise_app_exceptions:
169 raise
170
171 response_complete.set()
172 if status_code is None:
173 status_code = 500
174 if response_headers is None:
175 response_headers = {}
176
177 assert response_complete.is_set()
178 assert status_code is not None
179 assert response_headers is not None
180
181 stream = ASGIResponseStream(body_parts)
182
183 return Response(status_code, headers=response_headers, stream=stream)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py
--- a/httpx/_transports/asgi.py
+++ b/httpx/_transports/asgi.py
@@ -16,13 +16,13 @@
Event = typing.Union[asyncio.Event, trio.Event]
-_Message = typing.Dict[str, typing.Any]
+_Message = typing.MutableMapping[str, typing.Any]
_Receive = typing.Callable[[], typing.Awaitable[_Message]]
_Send = typing.Callable[
- [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]
+ [typing.MutableMapping[str, typing.Any]], typing.Awaitable[None]
]
_ASGIApp = typing.Callable[
- [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]
+ [typing.MutableMapping[str, typing.Any], _Receive, _Send], typing.Awaitable[None]
]
__all__ = ["ASGITransport"]
@@ -141,7 +141,7 @@
return {"type": "http.request", "body": b"", "more_body": False}
return {"type": "http.request", "body": body, "more_body": True}
- async def send(message: dict[str, typing.Any]) -> None:
+ async def send(message: typing.MutableMapping[str, typing.Any]) -> None:
nonlocal status_code, response_headers, response_started
if message["type"] == "http.response.start":
|
{"golden_diff": "diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py\n--- a/httpx/_transports/asgi.py\n+++ b/httpx/_transports/asgi.py\n@@ -16,13 +16,13 @@\n Event = typing.Union[asyncio.Event, trio.Event]\n \n \n-_Message = typing.Dict[str, typing.Any]\n+_Message = typing.MutableMapping[str, typing.Any]\n _Receive = typing.Callable[[], typing.Awaitable[_Message]]\n _Send = typing.Callable[\n- [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]\n+ [typing.MutableMapping[str, typing.Any]], typing.Awaitable[None]\n ]\n _ASGIApp = typing.Callable[\n- [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]\n+ [typing.MutableMapping[str, typing.Any], _Receive, _Send], typing.Awaitable[None]\n ]\n \n __all__ = [\"ASGITransport\"]\n@@ -141,7 +141,7 @@\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n \n- async def send(message: dict[str, typing.Any]) -> None:\n+ async def send(message: typing.MutableMapping[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n \n if message[\"type\"] == \"http.response.start\":\n", "issue": "0.27.0: replacing `app=app` with `transport=ASGITransport(app)` doesn't type check\n```diff\r\n @pytest_asyncio.fixture\r\n async def deprecated_client(deprecated_db_manager: MainDbManager) -> AsyncGenerator[AsyncClient, None]:\r\n async with _make_mocked_test_app(db_manager=deprecated_db_manager, authorization=False) as app:\r\n- async with AsyncClient(app=app, base_url=URL, headers=USER_AGENT) as ac:\r\n+ async with AsyncClient(transport=ASGITransport(app), base_url=URL, headers=USER_AGENT) as ac:\r\n yield ac\r\n```\r\n\r\n```\r\n% poetry run mypy . \r\ntests/conftest.py:64: error: Argument 1 to \"ASGITransport\" has incompatible type \"FastAPI\"; expected \"Callable[[dict[str, Any], Callable[[], Awaitable[dict[str, Any]]], Callable[[dict[str, Any]], Coroutine[None, None, None]]], Coroutine[None, None, None]]\" [arg-type]\r\ntests/conftest.py:64: note: \"FastAPI.__call__\" has type \"Callable[[Arg(MutableMapping[str, Any], 'scope'), Arg(Callable[[], Awaitable[MutableMapping[str, Any]]], 'receive'), Arg(Callable[[MutableMapping[str, Any]], Awaitable[None]], 'send')], Coroutine[Any, Any, None]]\"\r\nFound 1 error in 1 file (checked 472 source files)\r\n \r\n It looks like FastAPI and httpx disagree on the shape of an ASGI application. How am I meant to resolve this?\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing\n\nimport sniffio\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.Dict[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]\n]\n_ASGIApp = typing.Callable[\n [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]\n]\n\n__all__ = [\"ASGITransport\"]\n\n\ndef create_event() -> Event:\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: list[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": request.method,\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n \"raw_path\": request.url.raw_path.split(b\"?\")[0],\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: dict[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and request.method != \"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n", "path": "httpx/_transports/asgi.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing\n\nimport sniffio\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.MutableMapping[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.MutableMapping[str, typing.Any]], typing.Awaitable[None]\n]\n_ASGIApp = typing.Callable[\n [typing.MutableMapping[str, typing.Any], _Receive, _Send], typing.Awaitable[None]\n]\n\n__all__ = [\"ASGITransport\"]\n\n\ndef create_event() -> Event:\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: list[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": request.method,\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n \"raw_path\": request.url.raw_path.split(b\"?\")[0],\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: typing.MutableMapping[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and request.method != \"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n", "path": "httpx/_transports/asgi.py"}]}
| 2,314 | 339 |
gh_patches_debug_39882
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Promote people to superuser in Django admin
Replace the current `is_staff` checkbox with a `is_superuser` checkbox. This way, the project manager can give superuser access (access to Django admin interface) to people without touching the command line.
Project managers can access the Cantus editing functionalities (within the Cantus DB website, not the Admin interface).
Superusers can access the Django admin interface.
If we want someone to have access to both, they need to be in the `project manager` group and have `is_superuser` as `True`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/admin.py`
Content:
```
1 from django.contrib import admin
2 from main_app.models import *
3 # Register your models here.
4 admin.site.register(Chant)
5 admin.site.register(Feast)
6 admin.site.register(Genre)
7 admin.site.register(Indexer)
8 admin.site.register(Notation)
9 admin.site.register(Office)
10 admin.site.register(Provenance)
11 admin.site.register(Segment)
12 admin.site.register(Sequence)
13 admin.site.register(Source)
14
```
Path: `django/cantusdb_project/users/admin.py`
Content:
```
1 from django.contrib import admin
2 from .models import *
3 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
4 from django.contrib.auth.forms import UserCreationForm, UserChangeForm
5
6 # Register your models here.
7
8 class CustomUserCreationForm(UserCreationForm):
9
10 class Meta:
11 model = User
12 fields = (
13 'email', 'password', 'is_active', 'date_joined', 'last_login',
14 'full_name', 'first_name', 'last_name', 'institution', 'city',
15 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',
16 )
17
18
19 class CustomUserChangeForm(UserChangeForm):
20
21 class Meta:
22 model = User
23 fields = (
24 'email', 'password', 'is_active', 'date_joined', 'last_login',
25 'full_name', 'first_name', 'last_name', 'institution', 'city',
26 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',
27 )
28
29
30 class UserAdmin(BaseUserAdmin):
31 form = CustomUserChangeForm
32 add_form = CustomUserCreationForm
33 list_display = ('email', 'first_name', 'last_name', 'institution',)
34 list_filter = ('groups',)
35 fieldsets = (
36 ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),
37 ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),
38 ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),
39 )
40 add_fieldsets = (
41 ('Account info', {'fields': ('email', ('password1', 'password2'),)}),
42 ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),
43 ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),
44 )
45 search_fields = ('email', 'first_name', 'last_name', 'institution',)
46 ordering = ('email',)
47 filter_horizontal = ('groups', 'sources_user_can_edit',)
48
49 admin.site.register(User, UserAdmin)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py
--- a/django/cantusdb_project/main_app/admin.py
+++ b/django/cantusdb_project/main_app/admin.py
@@ -1,6 +1,15 @@
from django.contrib import admin
from main_app.models import *
+from django.contrib.auth import get_user_model
# Register your models here.
+
+class SourcesUserCanEditInline(admin.TabularInline):
+ model = get_user_model().sources_user_can_edit.through
+
+class SourceAdmin(admin.ModelAdmin):
+ inlines = [SourcesUserCanEditInline]
+ filter_horizontal = ('century', 'notation', 'current_editors', 'inventoried_by', 'full_text_entered_by', 'melodies_entered_by', 'proofreaders', 'other_editors')
+
admin.site.register(Chant)
admin.site.register(Feast)
admin.site.register(Genre)
@@ -10,4 +19,4 @@
admin.site.register(Provenance)
admin.site.register(Segment)
admin.site.register(Sequence)
-admin.site.register(Source)
+admin.site.register(Source, SourceAdmin)
diff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py
--- a/django/cantusdb_project/users/admin.py
+++ b/django/cantusdb_project/users/admin.py
@@ -5,42 +5,19 @@
# Register your models here.
-class CustomUserCreationForm(UserCreationForm):
-
- class Meta:
- model = User
- fields = (
- 'email', 'password', 'is_active', 'date_joined', 'last_login',
- 'full_name', 'first_name', 'last_name', 'institution', 'city',
- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',
- )
-
-
-class CustomUserChangeForm(UserChangeForm):
-
- class Meta:
- model = User
- fields = (
- 'email', 'password', 'is_active', 'date_joined', 'last_login',
- 'full_name', 'first_name', 'last_name', 'institution', 'city',
- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',
- )
-
-
class UserAdmin(BaseUserAdmin):
- form = CustomUserChangeForm
- add_form = CustomUserCreationForm
+ readonly_fields = ('date_joined', 'last_login',)
list_display = ('email', 'first_name', 'last_name', 'institution',)
list_filter = ('groups',)
fieldsets = (
('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),
('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),
- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),
+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),
)
add_fieldsets = (
('Account info', {'fields': ('email', ('password1', 'password2'),)}),
('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),
- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),
+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),
)
search_fields = ('email', 'first_name', 'last_name', 'institution',)
ordering = ('email',)
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -1,6 +1,15 @@\n from django.contrib import admin\n from main_app.models import *\n+from django.contrib.auth import get_user_model\n # Register your models here.\n+\n+class SourcesUserCanEditInline(admin.TabularInline):\n+ model = get_user_model().sources_user_can_edit.through\n+\n+class SourceAdmin(admin.ModelAdmin):\n+ inlines = [SourcesUserCanEditInline]\n+ filter_horizontal = ('century', 'notation', 'current_editors', 'inventoried_by', 'full_text_entered_by', 'melodies_entered_by', 'proofreaders', 'other_editors')\n+\n admin.site.register(Chant)\n admin.site.register(Feast)\n admin.site.register(Genre)\n@@ -10,4 +19,4 @@\n admin.site.register(Provenance)\n admin.site.register(Segment)\n admin.site.register(Sequence)\n-admin.site.register(Source)\n+admin.site.register(Source, SourceAdmin)\ndiff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py\n--- a/django/cantusdb_project/users/admin.py\n+++ b/django/cantusdb_project/users/admin.py\n@@ -5,42 +5,19 @@\n \n # Register your models here.\n \n-class CustomUserCreationForm(UserCreationForm):\n-\n- class Meta:\n- model = User\n- fields = (\n- 'email', 'password', 'is_active', 'date_joined', 'last_login', \n- 'full_name', 'first_name', 'last_name', 'institution', 'city', \n- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n- )\n-\n-\n-class CustomUserChangeForm(UserChangeForm):\n-\n- class Meta:\n- model = User\n- fields = (\n- 'email', 'password', 'is_active', 'date_joined', 'last_login', \n- 'full_name', 'first_name', 'last_name', 'institution', 'city', \n- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n- )\n-\n-\n class UserAdmin(BaseUserAdmin):\n- form = CustomUserChangeForm\n- add_form = CustomUserCreationForm \n+ readonly_fields = ('date_joined', 'last_login',)\n list_display = ('email', 'first_name', 'last_name', 'institution',)\n list_filter = ('groups',)\n fieldsets = (\n ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n add_fieldsets = (\n ('Account info', {'fields': ('email', ('password1', 'password2'),)}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n search_fields = ('email', 'first_name', 'last_name', 'institution',)\n ordering = ('email',)\n", "issue": "Promote people to superuser in Django admin\nReplace the current `is_staff` checkbox with a `is_superuser` checkbox. This way, the project manager can give superuser access (access to Django admin interface) to people without touching the command line. \r\n\r\nProject managers can access the Cantus editing functionalities (within the Cantus DB website, not the Admin interface). \r\nSuperusers can access the Django admin interface. \r\nIf we want someone to have access to both, they need to be in the `project manager` group and have `is_superuser` as `True`.\n", "before_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\n# Register your models here.\nadmin.site.register(Chant)\nadmin.site.register(Feast)\nadmin.site.register(Genre)\nadmin.site.register(Indexer)\nadmin.site.register(Notation)\nadmin.site.register(Office)\nadmin.site.register(Provenance)\nadmin.site.register(Segment)\nadmin.site.register(Sequence)\nadmin.site.register(Source)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\n# Register your models here.\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta:\n model = User\n fields = (\n 'email', 'password', 'is_active', 'date_joined', 'last_login', \n 'full_name', 'first_name', 'last_name', 'institution', 'city', \n 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n )\n\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = User\n fields = (\n 'email', 'password', 'is_active', 'date_joined', 'last_login', \n 'full_name', 'first_name', 'last_name', 'institution', 'city', \n 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n )\n\n\nclass UserAdmin(BaseUserAdmin):\n form = CustomUserChangeForm\n add_form = CustomUserCreationForm \n list_display = ('email', 'first_name', 'last_name', 'institution',)\n list_filter = ('groups',)\n fieldsets = (\n ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n )\n add_fieldsets = (\n ('Account info', {'fields': ('email', ('password1', 'password2'),)}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n )\n search_fields = ('email', 'first_name', 'last_name', 'institution',)\n ordering = ('email',)\n filter_horizontal = ('groups', 'sources_user_can_edit',)\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\nfrom django.contrib.auth import get_user_model\n# Register your models here.\n\nclass SourcesUserCanEditInline(admin.TabularInline):\n model = get_user_model().sources_user_can_edit.through\n\nclass SourceAdmin(admin.ModelAdmin):\n inlines = [SourcesUserCanEditInline]\n filter_horizontal = ('century', 'notation', 'current_editors', 'inventoried_by', 'full_text_entered_by', 'melodies_entered_by', 'proofreaders', 'other_editors')\n\nadmin.site.register(Chant)\nadmin.site.register(Feast)\nadmin.site.register(Genre)\nadmin.site.register(Indexer)\nadmin.site.register(Notation)\nadmin.site.register(Office)\nadmin.site.register(Provenance)\nadmin.site.register(Segment)\nadmin.site.register(Sequence)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\n# Register your models here.\n\nclass UserAdmin(BaseUserAdmin):\n readonly_fields = ('date_joined', 'last_login',)\n list_display = ('email', 'first_name', 'last_name', 'institution',)\n list_filter = ('groups',)\n fieldsets = (\n ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n add_fieldsets = (\n ('Account info', {'fields': ('email', ('password1', 'password2'),)}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n search_fields = ('email', 'first_name', 'last_name', 'institution',)\n ordering = ('email',)\n filter_horizontal = ('groups', 'sources_user_can_edit',)\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}]}
| 1,087 | 842 |
gh_patches_debug_39641
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-6275
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/rule_state.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, Dict, List, Optional
4
5 import great_expectations.exceptions as ge_exceptions
6 from great_expectations.rule_based_profiler.domain import Domain
7 from great_expectations.rule_based_profiler.parameter_container import (
8 ParameterContainer,
9 )
10
11 if TYPE_CHECKING:
12 from great_expectations.rule_based_profiler.rule.rule import Rule
13
14
15 class RuleState:
16 """
17 RuleState maintains state information, resulting from executing "Rule.run()" method by combining passed "Batch" data
18 with currently loaded configuration of "Rule" components ("DomainBuilder" object, "ParameterBuilder" objects, and
19 "ExpectationConfigurationBuilder" objects). Using "RuleState" with correponding flags is sufficient for generating
20 outputs for different purposes (in raw and aggregated form) from available "Domain" objects and computed parameters.
21 """
22
23 def __init__(
24 self,
25 rule: Optional[Rule] = None,
26 variables: Optional[ParameterContainer] = None,
27 domains: Optional[List[Domain]] = None,
28 parameters: Optional[Dict[str, ParameterContainer]] = None,
29 ) -> None:
30 """
31 Args:
32 rule: Rule object for which present RuleState object corresponds (needed for various Rule properties).
33 variables: attribute name/value pairs (part of state, relevant for associated Rule).
34 domains: List of Domain objects, which DomainBuilder of associated Rule generated.
35 parameters: Dictionary of ParameterContainer objects corresponding to all Domain objects in memory.
36 """
37 self._rule = rule
38
39 self._variables = variables
40
41 if domains is None:
42 domains = []
43
44 self._domains = domains
45
46 if parameters is None:
47 parameters = {}
48
49 self._parameters = parameters
50
51 self._rule_domain_builder_execution_time = 0.0
52 self._rule_execution_time = 0.0
53
54 @property
55 def rule(self) -> Optional[Rule]:
56 return self._rule
57
58 @rule.setter
59 def rule(self, value: Rule) -> None:
60 self._rule = value
61
62 @property
63 def variables(self) -> Optional[ParameterContainer]:
64 return self._variables
65
66 @variables.setter
67 def variables(self, value: Optional[ParameterContainer]) -> None:
68 self._variables = value
69
70 @property
71 def domains(self) -> List[Domain]:
72 return self._domains
73
74 @domains.setter
75 def domains(self, value: Optional[List[Domain]]) -> None:
76 self._domains = value
77
78 @property
79 def parameters(self) -> Dict[str, ParameterContainer]:
80 return self._parameters
81
82 @parameters.setter
83 def parameters(self, value: Optional[Dict[str, ParameterContainer]]) -> None:
84 self._parameters = value
85
86 @property
87 def rule_domain_builder_execution_time(self) -> float:
88 return self._rule_domain_builder_execution_time
89
90 @rule_domain_builder_execution_time.setter
91 def rule_domain_builder_execution_time(self, value: float) -> None:
92 self._rule_domain_builder_execution_time = value
93
94 @property
95 def rule_execution_time(self) -> float:
96 return self._rule_execution_time
97
98 @rule_execution_time.setter
99 def rule_execution_time(self, value: float) -> None:
100 self._rule_execution_time = value
101
102 def reset(self) -> None:
103 self.reset_domains()
104 self.reset_parameter_containers()
105
106 def reset_domains(self) -> None:
107 self.domains = []
108
109 def reset_parameter_containers(self) -> None:
110 self.parameters = {}
111
112 def add_domain(
113 self,
114 domain: Domain,
115 allow_duplicates: bool = False,
116 ) -> None:
117 domain_cursor: Domain
118 if not allow_duplicates and domain.id in [
119 domain_cursor.id for domain_cursor in self.domains
120 ]:
121 raise ge_exceptions.ProfilerConfigurationError(
122 f"""Error: Domain\n{domain}\nalready exists. In order to add it, either pass "allow_duplicates=True" \
123 or call "RuleState.remove_domain_if_exists()" with Domain having ID equal to "{domain.id}" as argument first.
124 """
125 )
126 self.domains.append(domain)
127
128 def remove_domain_if_exists(self, domain: Domain) -> None:
129 domain_cursor: Domain
130 if domain.id in [domain_cursor.id for domain_cursor in self.domains]:
131 self.domains.remove(domain)
132 self.remove_domain_if_exists(domain=domain)
133
134 def get_domains_as_dict(self) -> Dict[str, Domain]:
135 domain: Domain
136 return {domain.id: domain for domain in self.domains}
137
138 def initialize_parameter_container_for_domain(
139 self,
140 domain: Domain,
141 overwrite: bool = True,
142 ) -> None:
143 if not overwrite and domain.id in self.parameters:
144 raise ge_exceptions.ProfilerConfigurationError(
145 f"""Error: ParameterContainer for Domain\n{domain}\nalready exists. In order to overwrite it, either \
146 pass "overwrite=True" or call "RuleState.remove_parameter_container_from_domain()" with Domain having ID equal to \
147 "{domain.id}" as argument first.
148 """
149 )
150
151 parameter_container = ParameterContainer(parameter_nodes=None)
152 self._parameters[domain.id] = parameter_container
153
154 def remove_parameter_container_from_domain_if_exists(self, domain: Domain) -> None:
155 self.parameters.pop(domain.id, None)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/rule_based_profiler/rule_state.py b/great_expectations/rule_based_profiler/rule_state.py
--- a/great_expectations/rule_based_profiler/rule_state.py
+++ b/great_expectations/rule_based_profiler/rule_state.py
@@ -23,26 +23,26 @@
def __init__(
self,
rule: Optional[Rule] = None,
- variables: Optional[ParameterContainer] = None,
domains: Optional[List[Domain]] = None,
+ variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> None:
"""
Args:
rule: Rule object for which present RuleState object corresponds (needed for various Rule properties).
- variables: attribute name/value pairs (part of state, relevant for associated Rule).
domains: List of Domain objects, which DomainBuilder of associated Rule generated.
+ variables: attribute name/value pairs (part of state, relevant for associated Rule).
parameters: Dictionary of ParameterContainer objects corresponding to all Domain objects in memory.
"""
self._rule = rule
- self._variables = variables
-
if domains is None:
domains = []
self._domains = domains
+ self._variables = variables
+
if parameters is None:
parameters = {}
@@ -59,14 +59,6 @@
def rule(self, value: Rule) -> None:
self._rule = value
- @property
- def variables(self) -> Optional[ParameterContainer]:
- return self._variables
-
- @variables.setter
- def variables(self, value: Optional[ParameterContainer]) -> None:
- self._variables = value
-
@property
def domains(self) -> List[Domain]:
return self._domains
@@ -75,6 +67,14 @@
def domains(self, value: Optional[List[Domain]]) -> None:
self._domains = value
+ @property
+ def variables(self) -> Optional[ParameterContainer]:
+ return self._variables
+
+ @variables.setter
+ def variables(self, value: Optional[ParameterContainer]) -> None:
+ self._variables = value
+
@property
def parameters(self) -> Dict[str, ParameterContainer]:
return self._parameters
@@ -123,6 +123,7 @@
or call "RuleState.remove_domain_if_exists()" with Domain having ID equal to "{domain.id}" as argument first.
"""
)
+
self.domains.append(domain)
def remove_domain_if_exists(self, domain: Domain) -> None:
|
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/rule_state.py b/great_expectations/rule_based_profiler/rule_state.py\n--- a/great_expectations/rule_based_profiler/rule_state.py\n+++ b/great_expectations/rule_based_profiler/rule_state.py\n@@ -23,26 +23,26 @@\n def __init__(\n self,\n rule: Optional[Rule] = None,\n- variables: Optional[ParameterContainer] = None,\n domains: Optional[List[Domain]] = None,\n+ variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n ) -> None:\n \"\"\"\n Args:\n rule: Rule object for which present RuleState object corresponds (needed for various Rule properties).\n- variables: attribute name/value pairs (part of state, relevant for associated Rule).\n domains: List of Domain objects, which DomainBuilder of associated Rule generated.\n+ variables: attribute name/value pairs (part of state, relevant for associated Rule).\n parameters: Dictionary of ParameterContainer objects corresponding to all Domain objects in memory.\n \"\"\"\n self._rule = rule\n \n- self._variables = variables\n-\n if domains is None:\n domains = []\n \n self._domains = domains\n \n+ self._variables = variables\n+\n if parameters is None:\n parameters = {}\n \n@@ -59,14 +59,6 @@\n def rule(self, value: Rule) -> None:\n self._rule = value\n \n- @property\n- def variables(self) -> Optional[ParameterContainer]:\n- return self._variables\n-\n- @variables.setter\n- def variables(self, value: Optional[ParameterContainer]) -> None:\n- self._variables = value\n-\n @property\n def domains(self) -> List[Domain]:\n return self._domains\n@@ -75,6 +67,14 @@\n def domains(self, value: Optional[List[Domain]]) -> None:\n self._domains = value\n \n+ @property\n+ def variables(self) -> Optional[ParameterContainer]:\n+ return self._variables\n+\n+ @variables.setter\n+ def variables(self, value: Optional[ParameterContainer]) -> None:\n+ self._variables = value\n+\n @property\n def parameters(self) -> Dict[str, ParameterContainer]:\n return self._parameters\n@@ -123,6 +123,7 @@\n or call \"RuleState.remove_domain_if_exists()\" with Domain having ID equal to \"{domain.id}\" as argument first.\n \"\"\"\n )\n+\n self.domains.append(domain)\n \n def remove_domain_if_exists(self, domain: Domain) -> None:\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Dict, List, Optional\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.parameter_container import (\n ParameterContainer,\n)\n\nif TYPE_CHECKING:\n from great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass RuleState:\n \"\"\"\n RuleState maintains state information, resulting from executing \"Rule.run()\" method by combining passed \"Batch\" data\n with currently loaded configuration of \"Rule\" components (\"DomainBuilder\" object, \"ParameterBuilder\" objects, and\n \"ExpectationConfigurationBuilder\" objects). Using \"RuleState\" with correponding flags is sufficient for generating\n outputs for different purposes (in raw and aggregated form) from available \"Domain\" objects and computed parameters.\n \"\"\"\n\n def __init__(\n self,\n rule: Optional[Rule] = None,\n variables: Optional[ParameterContainer] = None,\n domains: Optional[List[Domain]] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n ) -> None:\n \"\"\"\n Args:\n rule: Rule object for which present RuleState object corresponds (needed for various Rule properties).\n variables: attribute name/value pairs (part of state, relevant for associated Rule).\n domains: List of Domain objects, which DomainBuilder of associated Rule generated.\n parameters: Dictionary of ParameterContainer objects corresponding to all Domain objects in memory.\n \"\"\"\n self._rule = rule\n\n self._variables = variables\n\n if domains is None:\n domains = []\n\n self._domains = domains\n\n if parameters is None:\n parameters = {}\n\n self._parameters = parameters\n\n self._rule_domain_builder_execution_time = 0.0\n self._rule_execution_time = 0.0\n\n @property\n def rule(self) -> Optional[Rule]:\n return self._rule\n\n @rule.setter\n def rule(self, value: Rule) -> None:\n self._rule = value\n\n @property\n def variables(self) -> Optional[ParameterContainer]:\n return self._variables\n\n @variables.setter\n def variables(self, value: Optional[ParameterContainer]) -> None:\n self._variables = value\n\n @property\n def domains(self) -> List[Domain]:\n return self._domains\n\n @domains.setter\n def domains(self, value: Optional[List[Domain]]) -> None:\n self._domains = value\n\n @property\n def parameters(self) -> Dict[str, ParameterContainer]:\n return self._parameters\n\n @parameters.setter\n def parameters(self, value: Optional[Dict[str, ParameterContainer]]) -> None:\n self._parameters = value\n\n @property\n def rule_domain_builder_execution_time(self) -> float:\n return self._rule_domain_builder_execution_time\n\n @rule_domain_builder_execution_time.setter\n def rule_domain_builder_execution_time(self, value: float) -> None:\n self._rule_domain_builder_execution_time = value\n\n @property\n def rule_execution_time(self) -> float:\n return self._rule_execution_time\n\n @rule_execution_time.setter\n def rule_execution_time(self, value: float) -> None:\n self._rule_execution_time = value\n\n def reset(self) -> None:\n self.reset_domains()\n self.reset_parameter_containers()\n\n def reset_domains(self) -> None:\n self.domains = []\n\n def reset_parameter_containers(self) -> None:\n self.parameters = {}\n\n def add_domain(\n self,\n domain: Domain,\n allow_duplicates: bool = False,\n ) -> None:\n domain_cursor: Domain\n if not allow_duplicates and domain.id in [\n domain_cursor.id for domain_cursor in self.domains\n ]:\n raise ge_exceptions.ProfilerConfigurationError(\n f\"\"\"Error: Domain\\n{domain}\\nalready exists. In order to add it, either pass \"allow_duplicates=True\" \\\nor call \"RuleState.remove_domain_if_exists()\" with Domain having ID equal to \"{domain.id}\" as argument first.\n\"\"\"\n )\n self.domains.append(domain)\n\n def remove_domain_if_exists(self, domain: Domain) -> None:\n domain_cursor: Domain\n if domain.id in [domain_cursor.id for domain_cursor in self.domains]:\n self.domains.remove(domain)\n self.remove_domain_if_exists(domain=domain)\n\n def get_domains_as_dict(self) -> Dict[str, Domain]:\n domain: Domain\n return {domain.id: domain for domain in self.domains}\n\n def initialize_parameter_container_for_domain(\n self,\n domain: Domain,\n overwrite: bool = True,\n ) -> None:\n if not overwrite and domain.id in self.parameters:\n raise ge_exceptions.ProfilerConfigurationError(\n f\"\"\"Error: ParameterContainer for Domain\\n{domain}\\nalready exists. In order to overwrite it, either \\\npass \"overwrite=True\" or call \"RuleState.remove_parameter_container_from_domain()\" with Domain having ID equal to \\\n\"{domain.id}\" as argument first.\n\"\"\"\n )\n\n parameter_container = ParameterContainer(parameter_nodes=None)\n self._parameters[domain.id] = parameter_container\n\n def remove_parameter_container_from_domain_if_exists(self, domain: Domain) -> None:\n self.parameters.pop(domain.id, None)\n", "path": "great_expectations/rule_based_profiler/rule_state.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Dict, List, Optional\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.parameter_container import (\n ParameterContainer,\n)\n\nif TYPE_CHECKING:\n from great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass RuleState:\n \"\"\"\n RuleState maintains state information, resulting from executing \"Rule.run()\" method by combining passed \"Batch\" data\n with currently loaded configuration of \"Rule\" components (\"DomainBuilder\" object, \"ParameterBuilder\" objects, and\n \"ExpectationConfigurationBuilder\" objects). Using \"RuleState\" with correponding flags is sufficient for generating\n outputs for different purposes (in raw and aggregated form) from available \"Domain\" objects and computed parameters.\n \"\"\"\n\n def __init__(\n self,\n rule: Optional[Rule] = None,\n domains: Optional[List[Domain]] = None,\n variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n ) -> None:\n \"\"\"\n Args:\n rule: Rule object for which present RuleState object corresponds (needed for various Rule properties).\n domains: List of Domain objects, which DomainBuilder of associated Rule generated.\n variables: attribute name/value pairs (part of state, relevant for associated Rule).\n parameters: Dictionary of ParameterContainer objects corresponding to all Domain objects in memory.\n \"\"\"\n self._rule = rule\n\n if domains is None:\n domains = []\n\n self._domains = domains\n\n self._variables = variables\n\n if parameters is None:\n parameters = {}\n\n self._parameters = parameters\n\n self._rule_domain_builder_execution_time = 0.0\n self._rule_execution_time = 0.0\n\n @property\n def rule(self) -> Optional[Rule]:\n return self._rule\n\n @rule.setter\n def rule(self, value: Rule) -> None:\n self._rule = value\n\n @property\n def domains(self) -> List[Domain]:\n return self._domains\n\n @domains.setter\n def domains(self, value: Optional[List[Domain]]) -> None:\n self._domains = value\n\n @property\n def variables(self) -> Optional[ParameterContainer]:\n return self._variables\n\n @variables.setter\n def variables(self, value: Optional[ParameterContainer]) -> None:\n self._variables = value\n\n @property\n def parameters(self) -> Dict[str, ParameterContainer]:\n return self._parameters\n\n @parameters.setter\n def parameters(self, value: Optional[Dict[str, ParameterContainer]]) -> None:\n self._parameters = value\n\n @property\n def rule_domain_builder_execution_time(self) -> float:\n return self._rule_domain_builder_execution_time\n\n @rule_domain_builder_execution_time.setter\n def rule_domain_builder_execution_time(self, value: float) -> None:\n self._rule_domain_builder_execution_time = value\n\n @property\n def rule_execution_time(self) -> float:\n return self._rule_execution_time\n\n @rule_execution_time.setter\n def rule_execution_time(self, value: float) -> None:\n self._rule_execution_time = value\n\n def reset(self) -> None:\n self.reset_domains()\n self.reset_parameter_containers()\n\n def reset_domains(self) -> None:\n self.domains = []\n\n def reset_parameter_containers(self) -> None:\n self.parameters = {}\n\n def add_domain(\n self,\n domain: Domain,\n allow_duplicates: bool = False,\n ) -> None:\n domain_cursor: Domain\n if not allow_duplicates and domain.id in [\n domain_cursor.id for domain_cursor in self.domains\n ]:\n raise ge_exceptions.ProfilerConfigurationError(\n f\"\"\"Error: Domain\\n{domain}\\nalready exists. In order to add it, either pass \"allow_duplicates=True\" \\\nor call \"RuleState.remove_domain_if_exists()\" with Domain having ID equal to \"{domain.id}\" as argument first.\n\"\"\"\n )\n\n self.domains.append(domain)\n\n def remove_domain_if_exists(self, domain: Domain) -> None:\n domain_cursor: Domain\n if domain.id in [domain_cursor.id for domain_cursor in self.domains]:\n self.domains.remove(domain)\n self.remove_domain_if_exists(domain=domain)\n\n def get_domains_as_dict(self) -> Dict[str, Domain]:\n domain: Domain\n return {domain.id: domain for domain in self.domains}\n\n def initialize_parameter_container_for_domain(\n self,\n domain: Domain,\n overwrite: bool = True,\n ) -> None:\n if not overwrite and domain.id in self.parameters:\n raise ge_exceptions.ProfilerConfigurationError(\n f\"\"\"Error: ParameterContainer for Domain\\n{domain}\\nalready exists. In order to overwrite it, either \\\npass \"overwrite=True\" or call \"RuleState.remove_parameter_container_from_domain()\" with Domain having ID equal to \\\n\"{domain.id}\" as argument first.\n\"\"\"\n )\n\n parameter_container = ParameterContainer(parameter_nodes=None)\n self._parameters[domain.id] = parameter_container\n\n def remove_parameter_container_from_domain_if_exists(self, domain: Domain) -> None:\n self.parameters.pop(domain.id, None)\n", "path": "great_expectations/rule_based_profiler/rule_state.py"}]}
| 1,807 | 584 |
gh_patches_debug_21942
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-220
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include build properties in gerrit summary callback
This allows a gerrit summary callback to do some more advanced filtering
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/steps/python.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16
17 from buildbot.status.results import SUCCESS, FAILURE, WARNINGS
18 from buildbot.steps.shell import ShellCommand
19 import re
20
21 try:
22 import cStringIO
23 StringIO = cStringIO.StringIO
24 except ImportError:
25 from StringIO import StringIO
26
27
28 class BuildEPYDoc(ShellCommand):
29 name = "epydoc"
30 command = ["make", "epydocs"]
31 description = ["building", "epydocs"]
32 descriptionDone = ["epydoc"]
33
34 def createSummary(self, log):
35 import_errors = 0
36 warnings = 0
37 errors = 0
38
39 for line in StringIO(log.getText()):
40 if line.startswith("Error importing "):
41 import_errors += 1
42 if line.find("Warning: ") != -1:
43 warnings += 1
44 if line.find("Error: ") != -1:
45 errors += 1
46
47 self.descriptionDone = self.descriptionDone[:]
48 if import_errors:
49 self.descriptionDone.append("ierr=%d" % import_errors)
50 if warnings:
51 self.descriptionDone.append("warn=%d" % warnings)
52 if errors:
53 self.descriptionDone.append("err=%d" % errors)
54
55 self.import_errors = import_errors
56 self.warnings = warnings
57 self.errors = errors
58
59 def evaluateCommand(self, cmd):
60 if cmd.rc != 0:
61 return FAILURE
62 if self.warnings or self.errors:
63 return WARNINGS
64 return SUCCESS
65
66
67 class PyFlakes(ShellCommand):
68 name = "pyflakes"
69 command = ["make", "pyflakes"]
70 description = ["running", "pyflakes"]
71 descriptionDone = ["pyflakes"]
72 flunkOnFailure = False
73 flunkingIssues = ["undefined"] # any pyflakes lines like this cause FAILURE
74
75 MESSAGES = ("unused", "undefined", "redefs", "import*", "misc")
76
77 def createSummary(self, log):
78 counts = {}
79 summaries = {}
80 for m in self.MESSAGES:
81 counts[m] = 0
82 summaries[m] = []
83
84 first = True
85 for line in StringIO(log.getText()).readlines():
86 # the first few lines might contain echoed commands from a 'make
87 # pyflakes' step, so don't count these as warnings. Stop ignoring
88 # the initial lines as soon as we see one with a colon.
89 if first:
90 if line.find(":") != -1:
91 # there's the colon, this is the first real line
92 first = False
93 # fall through and parse the line
94 else:
95 # skip this line, keep skipping non-colon lines
96 continue
97 if line.find("imported but unused") != -1:
98 m = "unused"
99 elif line.find("*' used; unable to detect undefined names") != -1:
100 m = "import*"
101 elif line.find("undefined name") != -1:
102 m = "undefined"
103 elif line.find("redefinition of unused") != -1:
104 m = "redefs"
105 else:
106 m = "misc"
107 summaries[m].append(line)
108 counts[m] += 1
109
110 self.descriptionDone = self.descriptionDone[:]
111 for m in self.MESSAGES:
112 if counts[m]:
113 self.descriptionDone.append("%s=%d" % (m, counts[m]))
114 self.addCompleteLog(m, "".join(summaries[m]))
115 self.setProperty("pyflakes-%s" % m, counts[m], "pyflakes")
116 self.setProperty("pyflakes-total", sum(counts.values()), "pyflakes")
117
118
119 def evaluateCommand(self, cmd):
120 if cmd.rc != 0:
121 return FAILURE
122 for m in self.flunkingIssues:
123 if self.getProperty("pyflakes-%s" % m):
124 return FAILURE
125 if self.getProperty("pyflakes-total"):
126 return WARNINGS
127 return SUCCESS
128
129 class PyLint(ShellCommand):
130 '''A command that knows about pylint output.
131 It's a good idea to add --output-format=parseable to your
132 command, since it includes the filename in the message.
133 '''
134 name = "pylint"
135 description = ["running", "pylint"]
136 descriptionDone = ["pylint"]
137
138 # Using the default text output, the message format is :
139 # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
140 # with --output-format=parseable it is: (the outer brackets are literal)
141 # FILE_NAME:LINE_NUM: [MESSAGE_TYPE[, OBJECT]] MESSAGE
142 # message type consists of the type char and 4 digits
143 # The message types:
144
145 MESSAGES = {
146 'C': "convention", # for programming standard violation
147 'R': "refactor", # for bad code smell
148 'W': "warning", # for python specific problems
149 'E': "error", # for much probably bugs in the code
150 'F': "fatal", # error prevented pylint from further processing.
151 'I': "info",
152 }
153
154 flunkingIssues = ["F", "E"] # msg categories that cause FAILURE
155
156 _re_groupname = 'errtype'
157 _msgtypes_re_str = '(?P<%s>[%s])' % (_re_groupname, ''.join(MESSAGES.keys()))
158 _default_line_re = re.compile(r'^%s: *\d+:.+' % _msgtypes_re_str)
159 _parseable_line_re = re.compile(r'[^:]+:\d+: \[%s[,\]] .+' % _msgtypes_re_str)
160
161 def createSummary(self, log):
162 counts = {}
163 summaries = {}
164 for m in self.MESSAGES:
165 counts[m] = 0
166 summaries[m] = []
167
168 line_re = None # decide after first match
169 for line in StringIO(log.getText()).readlines():
170 if not line_re:
171 # need to test both and then decide on one
172 if self._parseable_line_re.match(line):
173 line_re = self._parseable_line_re
174 elif self._default_line_re.match(line):
175 line_re = self._default_line_re
176 else: # no match yet
177 continue
178 mo = line_re.match(line)
179 if mo:
180 msgtype = mo.group(self._re_groupname)
181 assert msgtype in self.MESSAGES
182 summaries[msgtype].append(line)
183 counts[msgtype] += 1
184
185 self.descriptionDone = self.descriptionDone[:]
186 for msg, fullmsg in self.MESSAGES.items():
187 if counts[msg]:
188 self.descriptionDone.append("%s=%d" % (fullmsg, counts[msg]))
189 self.addCompleteLog(fullmsg, "".join(summaries[msg]))
190 self.setProperty("pylint-%s" % fullmsg, counts[msg])
191 self.setProperty("pylint-total", sum(counts.values()))
192
193 def evaluateCommand(self, cmd):
194 if cmd.rc != 0:
195 return FAILURE
196 for msg in self.flunkingIssues:
197 if self.getProperty("pylint-%s" % self.MESSAGES[msg]):
198 return FAILURE
199 if self.getProperty("pylint-total"):
200 return WARNINGS
201 return SUCCESS
202
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/steps/python.py b/master/buildbot/steps/python.py
--- a/master/buildbot/steps/python.py
+++ b/master/buildbot/steps/python.py
@@ -135,6 +135,17 @@
description = ["running", "pylint"]
descriptionDone = ["pylint"]
+ # pylint's return codes (see pylint(1) for details)
+ # 1 - 16 will be bit-ORed
+
+ RC_OK = 0
+ RC_FATAL = 1
+ RC_ERROR = 2
+ RC_WARNING = 4
+ RC_REFACTOR = 8
+ RC_CONVENTION = 16
+ RC_USAGE = 32
+
# Using the default text output, the message format is :
# MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE
# with --output-format=parseable it is: (the outer brackets are literal)
@@ -191,7 +202,7 @@
self.setProperty("pylint-total", sum(counts.values()))
def evaluateCommand(self, cmd):
- if cmd.rc != 0:
+ if cmd.rc & (self.RC_FATAL|self.RC_ERROR|self.RC_USAGE):
return FAILURE
for msg in self.flunkingIssues:
if self.getProperty("pylint-%s" % self.MESSAGES[msg]):
|
{"golden_diff": "diff --git a/master/buildbot/steps/python.py b/master/buildbot/steps/python.py\n--- a/master/buildbot/steps/python.py\n+++ b/master/buildbot/steps/python.py\n@@ -135,6 +135,17 @@\n description = [\"running\", \"pylint\"]\n descriptionDone = [\"pylint\"]\n \n+ # pylint's return codes (see pylint(1) for details)\n+ # 1 - 16 will be bit-ORed\n+\n+ RC_OK = 0\n+ RC_FATAL = 1\n+ RC_ERROR = 2\n+ RC_WARNING = 4\n+ RC_REFACTOR = 8\n+ RC_CONVENTION = 16\n+ RC_USAGE = 32\n+\n # Using the default text output, the message format is :\n # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE\n # with --output-format=parseable it is: (the outer brackets are literal)\n@@ -191,7 +202,7 @@\n self.setProperty(\"pylint-total\", sum(counts.values()))\n \n def evaluateCommand(self, cmd):\n- if cmd.rc != 0:\n+ if cmd.rc & (self.RC_FATAL|self.RC_ERROR|self.RC_USAGE):\n return FAILURE\n for msg in self.flunkingIssues:\n if self.getProperty(\"pylint-%s\" % self.MESSAGES[msg]):\n", "issue": "Include build properties in gerrit summary callback\nThis allows a gerrit summary callback to do some more advanced filtering\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\nfrom buildbot.status.results import SUCCESS, FAILURE, WARNINGS\nfrom buildbot.steps.shell import ShellCommand\nimport re\n\ntry:\n import cStringIO\n StringIO = cStringIO.StringIO\nexcept ImportError:\n from StringIO import StringIO\n\n\nclass BuildEPYDoc(ShellCommand):\n name = \"epydoc\"\n command = [\"make\", \"epydocs\"]\n description = [\"building\", \"epydocs\"]\n descriptionDone = [\"epydoc\"]\n\n def createSummary(self, log):\n import_errors = 0\n warnings = 0\n errors = 0\n\n for line in StringIO(log.getText()):\n if line.startswith(\"Error importing \"):\n import_errors += 1\n if line.find(\"Warning: \") != -1:\n warnings += 1\n if line.find(\"Error: \") != -1:\n errors += 1\n\n self.descriptionDone = self.descriptionDone[:]\n if import_errors:\n self.descriptionDone.append(\"ierr=%d\" % import_errors)\n if warnings:\n self.descriptionDone.append(\"warn=%d\" % warnings)\n if errors:\n self.descriptionDone.append(\"err=%d\" % errors)\n\n self.import_errors = import_errors\n self.warnings = warnings\n self.errors = errors\n\n def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n if self.warnings or self.errors:\n return WARNINGS\n return SUCCESS\n\n\nclass PyFlakes(ShellCommand):\n name = \"pyflakes\"\n command = [\"make\", \"pyflakes\"]\n description = [\"running\", \"pyflakes\"]\n descriptionDone = [\"pyflakes\"]\n flunkOnFailure = False\n flunkingIssues = [\"undefined\"] # any pyflakes lines like this cause FAILURE\n\n MESSAGES = (\"unused\", \"undefined\", \"redefs\", \"import*\", \"misc\")\n\n def createSummary(self, log):\n counts = {}\n summaries = {}\n for m in self.MESSAGES:\n counts[m] = 0\n summaries[m] = []\n\n first = True\n for line in StringIO(log.getText()).readlines():\n # the first few lines might contain echoed commands from a 'make\n # pyflakes' step, so don't count these as warnings. Stop ignoring\n # the initial lines as soon as we see one with a colon.\n if first:\n if line.find(\":\") != -1:\n # there's the colon, this is the first real line\n first = False\n # fall through and parse the line\n else:\n # skip this line, keep skipping non-colon lines\n continue\n if line.find(\"imported but unused\") != -1:\n m = \"unused\"\n elif line.find(\"*' used; unable to detect undefined names\") != -1:\n m = \"import*\"\n elif line.find(\"undefined name\") != -1:\n m = \"undefined\"\n elif line.find(\"redefinition of unused\") != -1:\n m = \"redefs\"\n else:\n m = \"misc\"\n summaries[m].append(line)\n counts[m] += 1\n\n self.descriptionDone = self.descriptionDone[:]\n for m in self.MESSAGES:\n if counts[m]:\n self.descriptionDone.append(\"%s=%d\" % (m, counts[m]))\n self.addCompleteLog(m, \"\".join(summaries[m]))\n self.setProperty(\"pyflakes-%s\" % m, counts[m], \"pyflakes\")\n self.setProperty(\"pyflakes-total\", sum(counts.values()), \"pyflakes\")\n\n\n def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n for m in self.flunkingIssues:\n if self.getProperty(\"pyflakes-%s\" % m):\n return FAILURE\n if self.getProperty(\"pyflakes-total\"):\n return WARNINGS\n return SUCCESS\n\nclass PyLint(ShellCommand):\n '''A command that knows about pylint output.\n It's a good idea to add --output-format=parseable to your\n command, since it includes the filename in the message.\n '''\n name = \"pylint\"\n description = [\"running\", \"pylint\"]\n descriptionDone = [\"pylint\"]\n\n # Using the default text output, the message format is :\n # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE\n # with --output-format=parseable it is: (the outer brackets are literal)\n # FILE_NAME:LINE_NUM: [MESSAGE_TYPE[, OBJECT]] MESSAGE\n # message type consists of the type char and 4 digits\n # The message types:\n\n MESSAGES = {\n 'C': \"convention\", # for programming standard violation\n 'R': \"refactor\", # for bad code smell\n 'W': \"warning\", # for python specific problems\n 'E': \"error\", # for much probably bugs in the code\n 'F': \"fatal\", # error prevented pylint from further processing.\n 'I': \"info\",\n }\n\n flunkingIssues = [\"F\", \"E\"] # msg categories that cause FAILURE\n\n _re_groupname = 'errtype'\n _msgtypes_re_str = '(?P<%s>[%s])' % (_re_groupname, ''.join(MESSAGES.keys()))\n _default_line_re = re.compile(r'^%s: *\\d+:.+' % _msgtypes_re_str)\n _parseable_line_re = re.compile(r'[^:]+:\\d+: \\[%s[,\\]] .+' % _msgtypes_re_str)\n\n def createSummary(self, log):\n counts = {}\n summaries = {}\n for m in self.MESSAGES:\n counts[m] = 0\n summaries[m] = []\n\n line_re = None # decide after first match\n for line in StringIO(log.getText()).readlines():\n if not line_re:\n # need to test both and then decide on one\n if self._parseable_line_re.match(line):\n line_re = self._parseable_line_re\n elif self._default_line_re.match(line):\n line_re = self._default_line_re\n else: # no match yet\n continue\n mo = line_re.match(line)\n if mo:\n msgtype = mo.group(self._re_groupname)\n assert msgtype in self.MESSAGES\n summaries[msgtype].append(line)\n counts[msgtype] += 1\n\n self.descriptionDone = self.descriptionDone[:]\n for msg, fullmsg in self.MESSAGES.items():\n if counts[msg]:\n self.descriptionDone.append(\"%s=%d\" % (fullmsg, counts[msg]))\n self.addCompleteLog(fullmsg, \"\".join(summaries[msg]))\n self.setProperty(\"pylint-%s\" % fullmsg, counts[msg])\n self.setProperty(\"pylint-total\", sum(counts.values()))\n\n def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n for msg in self.flunkingIssues:\n if self.getProperty(\"pylint-%s\" % self.MESSAGES[msg]):\n return FAILURE\n if self.getProperty(\"pylint-total\"):\n return WARNINGS\n return SUCCESS\n\n", "path": "master/buildbot/steps/python.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\nfrom buildbot.status.results import SUCCESS, FAILURE, WARNINGS\nfrom buildbot.steps.shell import ShellCommand\nimport re\n\ntry:\n import cStringIO\n StringIO = cStringIO.StringIO\nexcept ImportError:\n from StringIO import StringIO\n\n\nclass BuildEPYDoc(ShellCommand):\n name = \"epydoc\"\n command = [\"make\", \"epydocs\"]\n description = [\"building\", \"epydocs\"]\n descriptionDone = [\"epydoc\"]\n\n def createSummary(self, log):\n import_errors = 0\n warnings = 0\n errors = 0\n\n for line in StringIO(log.getText()):\n if line.startswith(\"Error importing \"):\n import_errors += 1\n if line.find(\"Warning: \") != -1:\n warnings += 1\n if line.find(\"Error: \") != -1:\n errors += 1\n\n self.descriptionDone = self.descriptionDone[:]\n if import_errors:\n self.descriptionDone.append(\"ierr=%d\" % import_errors)\n if warnings:\n self.descriptionDone.append(\"warn=%d\" % warnings)\n if errors:\n self.descriptionDone.append(\"err=%d\" % errors)\n\n self.import_errors = import_errors\n self.warnings = warnings\n self.errors = errors\n\n def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n if self.warnings or self.errors:\n return WARNINGS\n return SUCCESS\n\n\nclass PyFlakes(ShellCommand):\n name = \"pyflakes\"\n command = [\"make\", \"pyflakes\"]\n description = [\"running\", \"pyflakes\"]\n descriptionDone = [\"pyflakes\"]\n flunkOnFailure = False\n flunkingIssues = [\"undefined\"] # any pyflakes lines like this cause FAILURE\n\n MESSAGES = (\"unused\", \"undefined\", \"redefs\", \"import*\", \"misc\")\n\n def createSummary(self, log):\n counts = {}\n summaries = {}\n for m in self.MESSAGES:\n counts[m] = 0\n summaries[m] = []\n\n first = True\n for line in StringIO(log.getText()).readlines():\n # the first few lines might contain echoed commands from a 'make\n # pyflakes' step, so don't count these as warnings. Stop ignoring\n # the initial lines as soon as we see one with a colon.\n if first:\n if line.find(\":\") != -1:\n # there's the colon, this is the first real line\n first = False\n # fall through and parse the line\n else:\n # skip this line, keep skipping non-colon lines\n continue\n if line.find(\"imported but unused\") != -1:\n m = \"unused\"\n elif line.find(\"*' used; unable to detect undefined names\") != -1:\n m = \"import*\"\n elif line.find(\"undefined name\") != -1:\n m = \"undefined\"\n elif line.find(\"redefinition of unused\") != -1:\n m = \"redefs\"\n else:\n m = \"misc\"\n summaries[m].append(line)\n counts[m] += 1\n\n self.descriptionDone = self.descriptionDone[:]\n for m in self.MESSAGES:\n if counts[m]:\n self.descriptionDone.append(\"%s=%d\" % (m, counts[m]))\n self.addCompleteLog(m, \"\".join(summaries[m]))\n self.setProperty(\"pyflakes-%s\" % m, counts[m], \"pyflakes\")\n self.setProperty(\"pyflakes-total\", sum(counts.values()), \"pyflakes\")\n\n\n def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n for m in self.flunkingIssues:\n if self.getProperty(\"pyflakes-%s\" % m):\n return FAILURE\n if self.getProperty(\"pyflakes-total\"):\n return WARNINGS\n return SUCCESS\n\nclass PyLint(ShellCommand):\n '''A command that knows about pylint output.\n It's a good idea to add --output-format=parseable to your\n command, since it includes the filename in the message.\n '''\n name = \"pylint\"\n description = [\"running\", \"pylint\"]\n descriptionDone = [\"pylint\"]\n\n # pylint's return codes (see pylint(1) for details)\n # 1 - 16 will be bit-ORed\n\n RC_OK = 0\n RC_FATAL = 1\n RC_ERROR = 2\n RC_WARNING = 4\n RC_REFACTOR = 8\n RC_CONVENTION = 16\n RC_USAGE = 32\n\n # Using the default text output, the message format is :\n # MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE\n # with --output-format=parseable it is: (the outer brackets are literal)\n # FILE_NAME:LINE_NUM: [MESSAGE_TYPE[, OBJECT]] MESSAGE\n # message type consists of the type char and 4 digits\n # The message types:\n\n MESSAGES = {\n 'C': \"convention\", # for programming standard violation\n 'R': \"refactor\", # for bad code smell\n 'W': \"warning\", # for python specific problems\n 'E': \"error\", # for much probably bugs in the code\n 'F': \"fatal\", # error prevented pylint from further processing.\n 'I': \"info\",\n }\n\n flunkingIssues = [\"F\", \"E\"] # msg categories that cause FAILURE\n\n _re_groupname = 'errtype'\n _msgtypes_re_str = '(?P<%s>[%s])' % (_re_groupname, ''.join(MESSAGES.keys()))\n _default_line_re = re.compile(r'^%s: *\\d+:.+' % _msgtypes_re_str)\n _parseable_line_re = re.compile(r'[^:]+:\\d+: \\[%s[,\\]] .+' % _msgtypes_re_str)\n\n def createSummary(self, log):\n counts = {}\n summaries = {}\n for m in self.MESSAGES:\n counts[m] = 0\n summaries[m] = []\n\n line_re = None # decide after first match\n for line in StringIO(log.getText()).readlines():\n if not line_re:\n # need to test both and then decide on one\n if self._parseable_line_re.match(line):\n line_re = self._parseable_line_re\n elif self._default_line_re.match(line):\n line_re = self._default_line_re\n else: # no match yet\n continue\n mo = line_re.match(line)\n if mo:\n msgtype = mo.group(self._re_groupname)\n assert msgtype in self.MESSAGES\n summaries[msgtype].append(line)\n counts[msgtype] += 1\n\n self.descriptionDone = self.descriptionDone[:]\n for msg, fullmsg in self.MESSAGES.items():\n if counts[msg]:\n self.descriptionDone.append(\"%s=%d\" % (fullmsg, counts[msg]))\n self.addCompleteLog(fullmsg, \"\".join(summaries[msg]))\n self.setProperty(\"pylint-%s\" % fullmsg, counts[msg])\n self.setProperty(\"pylint-total\", sum(counts.values()))\n\n def evaluateCommand(self, cmd):\n if cmd.rc & (self.RC_FATAL|self.RC_ERROR|self.RC_USAGE):\n return FAILURE\n for msg in self.flunkingIssues:\n if self.getProperty(\"pylint-%s\" % self.MESSAGES[msg]):\n return FAILURE\n if self.getProperty(\"pylint-total\"):\n return WARNINGS\n return SUCCESS\n\n", "path": "master/buildbot/steps/python.py"}]}
| 2,504 | 311 |
gh_patches_debug_18354
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-34
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Manual env fails without helpful error when no benchmarks are loaded.
## 🐛 Bug
compiler_gym.bin.manual_env fails with stacktrace if no benchmarks are loaded.
## To Reproduce
```
> python -m compiler_gym.bin.manual_env --env=llvm-autophase-ic-v0 --benchmark=npb-v0/50
Initialized environment in 264.7ms
Benchmark: [0] random >>> random
> 0
Traceback (most recent call last):
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py", line 179, in <module>
main(sys.argv)
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py", line 175, in main
run_manual_env(env)
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py", line 60, in run_manual_env
eager_observation = env.reset(benchmark=benchmark)
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/envs/llvm/llvm_env.py", line 162, in reset
return super().reset(*args, **kwargs)
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/envs/compiler_env.py", line 466, in reset
reply = self.service(
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/service/connection.py", line 547, in __call__
return self.connection(
File "/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/service/connection.py", line 143, in __call__
raise ValueError(e.details()) from None
ValueError: Unknown benchmark "npb-v0/50"
```
## Expected behavior
Something like:
```
> python -m compiler_gym.bin.manual_env --env=llvm-autophase-ic-v0 --benchmark=npb-v0/50
Initialized environment in 264.7ms
No benchmarks available see https://facebookresearch.github.io/CompilerGym/getting_started.html#installing-benchmarks
Exiting...
```
## Environment
Please fill in this checklist:
- CompilerGym:
- How you installed CompilerGym (conda, pip, source): source
- OS: MacOs 10.15.7
- Python version: 3.8.4
- Build command you used (if compiling from source): make install
- GCC/clang version (if compiling from source): Apple clang version 12.0.0 (clang-1200.0.31.1)
- Bazel version (if compiling from source): 3.1.0- (@non-git)
- Versions of any other relevant libraries:
You may use the PyTorch
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/bin/manual_env.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 """Run a CompilerGym environment with text interface controls.
6
7 .. code-block::
8
9 $ python -m compiler_gym.bin.manual_env --env=<env> [--benchmark=<name>] [--observation=<space>] [--reward=<space>]
10
11 The benchmark to use can be specified using :code:`--benchmark=<name>`. If not
12 provided, you be presented with a list of benchmarks to choose from on launch.
13 Select :code:`random` to select a benchmark randomly.
14 """
15 import sys
16 from typing import Optional
17
18 from absl import app, flags
19
20 import compiler_gym.util.flags.ls_benchmark # Flag definition.
21 from compiler_gym.envs import CompilerEnv
22 from compiler_gym.util import user_input
23 from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags
24 from compiler_gym.util.flags.env_from_flags import env_from_flags
25 from compiler_gym.util.shell_format import emph
26 from compiler_gym.util.timer import Timer
27
28 FLAGS = flags.FLAGS
29
30
31 def run_manual_env(env: CompilerEnv):
32 """Run an environment manually.
33
34 The manual environment allows the user to step through the environment,
35 selection observations, rewards, and actions to run as they see fit. This is
36 useful for debugging.
37
38 :param env: The environment to run.
39 """
40 benchmark = None
41 if not env.benchmark:
42 # Allow the user to choose a benchmark, with the first choice being
43 # to select randomly.
44 benchmarks = sorted(env.benchmarks)
45 # Strip default benchmark:// protocol.
46 for i, benchmark in enumerate(benchmarks):
47 if benchmark.startswith("benchmark://"):
48 benchmarks[i] = benchmark[len("benchmark://") :]
49
50 benchmark_index = user_input.read_list_index(
51 "Benchmark", ["random"] + benchmarks
52 )
53 if benchmark_index:
54 benchmark = benchmarks[benchmark_index - 1]
55 else:
56 benchmark = None
57
58 with Timer() as timer:
59 eager_observation = env.reset(benchmark=benchmark)
60
61 print(f"Reset {env.benchmark} environment in {timer}")
62 if env.observation_space and eager_observation is not None:
63 print(f"Observation: {env.observation_space.to_string(eager_observation)}")
64
65 observation_names = sorted(env.observation.spaces.keys())
66 reward_names = sorted(env.reward.spaces.keys())
67 last_eager_reward: Optional[float] = None
68 step_count = 1
69
70 while True:
71 print(
72 f"\nStep {step_count}. Select: [{emph('a')}]ction "
73 f"[{emph('o')}]bservation [{emph('r')}]eward "
74 f"[{emph('c')}]ommandline [{emph('e')}]nd >>> ",
75 end="",
76 flush=True,
77 )
78 while True:
79 c = user_input.read_char()
80 if c == "a":
81 print("action", flush=True)
82 index = user_input.read_list_index(
83 "Actions", ["random"] + env.action_space.names
84 )
85 step_count += 1
86 with Timer() as t:
87 if index == 0:
88 # User selected "random" action.
89 index = env.action_space.sample()
90 else:
91 # Offset to remove "random" action from index.
92 index -= 1
93 eager_observation, eager_reward, done, info = env.step(index)
94
95 # Print the eager observation, if available.
96 if env.observation_space and eager_observation is not None:
97 print(
98 f"Observation: {env.observation_space.to_string(eager_observation)}"
99 )
100
101 # Print the eager reward and the diff, if available.
102 if env.reward_space and eager_reward is not None:
103 reward_diff = ""
104 if last_eager_reward is not None and eager_reward is not None:
105 reward_diff = (
106 f" (change: {eager_reward - last_eager_reward:.6f})"
107 )
108 print(f"Reward: {eager_reward:.6f}{reward_diff}")
109 last_eager_reward = eager_reward
110
111 print(
112 f"Action {env.action_space.names[index]} in {t}.",
113 " No effect." if info.get("action_had_no_effect") else "",
114 flush=True,
115 )
116 if done:
117 print("Episode ended by environment: ", info["error_details"])
118 env.close()
119 return
120 break
121 if c == "o":
122 print("observation", flush=True)
123 observation_name = user_input.read_list_value(
124 "Observable values", observation_names
125 )
126 with Timer() as timer:
127 value = env.observation[observation_name]
128 print(env.observation.spaces[observation_name].to_string(value))
129 print(f"Observation {observation_name} in {timer}")
130 break
131 elif c == "r":
132 print("reward", flush=True)
133 reward_name = user_input.read_list_value("Rewards", reward_names)
134 with Timer(f"Reward {reward_name}"):
135 print(f"{env.reward[reward_name]:.6f}")
136 break
137 elif c == "c":
138 print("commandline")
139 print("$", env.commandline(), flush=True)
140 break
141 elif c == "e":
142 print("end", flush=True)
143 with Timer("Closed environment"):
144 env.close()
145 print("Have a nice day!")
146 return
147
148
149 def main(argv):
150 """Main entry point."""
151 argv = FLAGS(argv)
152 if len(argv) != 1:
153 raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")
154
155 if FLAGS.ls_benchmark:
156 benchmark = benchmark_from_flags()
157 env = env_from_flags(benchmark)
158 print("\n".join(sorted(env.benchmarks)))
159 env.close()
160 return
161
162 with Timer("Initialized environment"):
163 benchmark = benchmark_from_flags()
164 env = env_from_flags(benchmark)
165
166 run_manual_env(env)
167
168
169 if __name__ == "__main__":
170 main(sys.argv)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/compiler_gym/bin/manual_env.py b/compiler_gym/bin/manual_env.py
--- a/compiler_gym/bin/manual_env.py
+++ b/compiler_gym/bin/manual_env.py
@@ -43,10 +43,16 @@
# Allow the user to choose a benchmark, with the first choice being
# to select randomly.
benchmarks = sorted(env.benchmarks)
+ if not benchmarks:
+ print("No benchmarks available see https://facebookresearch.github.io/CompilerGym/getting_started.html#installing-benchmarks")
+ print("Exiting...")
+ env.close()
+ return
+
# Strip default benchmark:// protocol.
for i, benchmark in enumerate(benchmarks):
if benchmark.startswith("benchmark://"):
- benchmarks[i] = benchmark[len("benchmark://") :]
+ benchmarks[i] = benchmark[len("benchmark://"):]
benchmark_index = user_input.read_list_index(
"Benchmark", ["random"] + benchmarks
|
{"golden_diff": "diff --git a/compiler_gym/bin/manual_env.py b/compiler_gym/bin/manual_env.py\n--- a/compiler_gym/bin/manual_env.py\n+++ b/compiler_gym/bin/manual_env.py\n@@ -43,10 +43,16 @@\n # Allow the user to choose a benchmark, with the first choice being\n # to select randomly.\n benchmarks = sorted(env.benchmarks)\n+ if not benchmarks:\n+ print(\"No benchmarks available see https://facebookresearch.github.io/CompilerGym/getting_started.html#installing-benchmarks\")\n+ print(\"Exiting...\")\n+ env.close()\n+ return\n+\n # Strip default benchmark:// protocol.\n for i, benchmark in enumerate(benchmarks):\n if benchmark.startswith(\"benchmark://\"):\n- benchmarks[i] = benchmark[len(\"benchmark://\") :]\n+ benchmarks[i] = benchmark[len(\"benchmark://\"):]\n \n benchmark_index = user_input.read_list_index(\n \"Benchmark\", [\"random\"] + benchmarks\n", "issue": "Manual env fails without helpful error when no benchmarks are loaded.\n## \ud83d\udc1b Bug\r\n\r\ncompiler_gym.bin.manual_env fails with stacktrace if no benchmarks are loaded.\r\n\r\n## To Reproduce\r\n```\r\n> python -m compiler_gym.bin.manual_env --env=llvm-autophase-ic-v0 --benchmark=npb-v0/50\r\nInitialized environment in 264.7ms\r\nBenchmark: [0] random >>> random\r\n> 0\r\nTraceback (most recent call last):\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py\", line 179, in <module>\r\n main(sys.argv)\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py\", line 175, in main\r\n run_manual_env(env)\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/bin/manual_env.py\", line 60, in run_manual_env\r\n eager_observation = env.reset(benchmark=benchmark)\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/envs/llvm/llvm_env.py\", line 162, in reset\r\n return super().reset(*args, **kwargs)\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/envs/compiler_env.py\", line 466, in reset\r\n reply = self.service(\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/service/connection.py\", line 547, in __call__\r\n return self.connection(\r\n File \"/Users/hleather/opt/miniconda3/envs/compiler_gym/lib/python3.8/site-packages/compiler_gym-0.1.1-py3.8.egg/compiler_gym/service/connection.py\", line 143, in __call__\r\n raise ValueError(e.details()) from None\r\nValueError: Unknown benchmark \"npb-v0/50\"\r\n```\r\n## Expected behavior\r\nSomething like:\r\n```\r\n> python -m compiler_gym.bin.manual_env --env=llvm-autophase-ic-v0 --benchmark=npb-v0/50\r\nInitialized environment in 264.7ms\r\nNo benchmarks available see https://facebookresearch.github.io/CompilerGym/getting_started.html#installing-benchmarks\r\nExiting...\r\n```\r\n\r\n## Environment\r\n\r\nPlease fill in this checklist:\r\n\r\n- CompilerGym:\r\n- How you installed CompilerGym (conda, pip, source): source\r\n- OS: MacOs 10.15.7\r\n- Python version: 3.8.4\r\n- Build command you used (if compiling from source): make install\r\n- GCC/clang version (if compiling from source): Apple clang version 12.0.0 (clang-1200.0.31.1)\r\n- Bazel version (if compiling from source): 3.1.0- (@non-git)\r\n- Versions of any other relevant libraries:\r\n\r\nYou may use the PyTorch\r\nN/A\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Run a CompilerGym environment with text interface controls.\n\n.. code-block::\n\n $ python -m compiler_gym.bin.manual_env --env=<env> [--benchmark=<name>] [--observation=<space>] [--reward=<space>]\n\nThe benchmark to use can be specified using :code:`--benchmark=<name>`. If not\nprovided, you be presented with a list of benchmarks to choose from on launch.\nSelect :code:`random` to select a benchmark randomly.\n\"\"\"\nimport sys\nfrom typing import Optional\n\nfrom absl import app, flags\n\nimport compiler_gym.util.flags.ls_benchmark # Flag definition.\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util import user_input\nfrom compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags\nfrom compiler_gym.util.flags.env_from_flags import env_from_flags\nfrom compiler_gym.util.shell_format import emph\nfrom compiler_gym.util.timer import Timer\n\nFLAGS = flags.FLAGS\n\n\ndef run_manual_env(env: CompilerEnv):\n \"\"\"Run an environment manually.\n\n The manual environment allows the user to step through the environment,\n selection observations, rewards, and actions to run as they see fit. This is\n useful for debugging.\n\n :param env: The environment to run.\n \"\"\"\n benchmark = None\n if not env.benchmark:\n # Allow the user to choose a benchmark, with the first choice being\n # to select randomly.\n benchmarks = sorted(env.benchmarks)\n # Strip default benchmark:// protocol.\n for i, benchmark in enumerate(benchmarks):\n if benchmark.startswith(\"benchmark://\"):\n benchmarks[i] = benchmark[len(\"benchmark://\") :]\n\n benchmark_index = user_input.read_list_index(\n \"Benchmark\", [\"random\"] + benchmarks\n )\n if benchmark_index:\n benchmark = benchmarks[benchmark_index - 1]\n else:\n benchmark = None\n\n with Timer() as timer:\n eager_observation = env.reset(benchmark=benchmark)\n\n print(f\"Reset {env.benchmark} environment in {timer}\")\n if env.observation_space and eager_observation is not None:\n print(f\"Observation: {env.observation_space.to_string(eager_observation)}\")\n\n observation_names = sorted(env.observation.spaces.keys())\n reward_names = sorted(env.reward.spaces.keys())\n last_eager_reward: Optional[float] = None\n step_count = 1\n\n while True:\n print(\n f\"\\nStep {step_count}. Select: [{emph('a')}]ction \"\n f\"[{emph('o')}]bservation [{emph('r')}]eward \"\n f\"[{emph('c')}]ommandline [{emph('e')}]nd >>> \",\n end=\"\",\n flush=True,\n )\n while True:\n c = user_input.read_char()\n if c == \"a\":\n print(\"action\", flush=True)\n index = user_input.read_list_index(\n \"Actions\", [\"random\"] + env.action_space.names\n )\n step_count += 1\n with Timer() as t:\n if index == 0:\n # User selected \"random\" action.\n index = env.action_space.sample()\n else:\n # Offset to remove \"random\" action from index.\n index -= 1\n eager_observation, eager_reward, done, info = env.step(index)\n\n # Print the eager observation, if available.\n if env.observation_space and eager_observation is not None:\n print(\n f\"Observation: {env.observation_space.to_string(eager_observation)}\"\n )\n\n # Print the eager reward and the diff, if available.\n if env.reward_space and eager_reward is not None:\n reward_diff = \"\"\n if last_eager_reward is not None and eager_reward is not None:\n reward_diff = (\n f\" (change: {eager_reward - last_eager_reward:.6f})\"\n )\n print(f\"Reward: {eager_reward:.6f}{reward_diff}\")\n last_eager_reward = eager_reward\n\n print(\n f\"Action {env.action_space.names[index]} in {t}.\",\n \" No effect.\" if info.get(\"action_had_no_effect\") else \"\",\n flush=True,\n )\n if done:\n print(\"Episode ended by environment: \", info[\"error_details\"])\n env.close()\n return\n break\n if c == \"o\":\n print(\"observation\", flush=True)\n observation_name = user_input.read_list_value(\n \"Observable values\", observation_names\n )\n with Timer() as timer:\n value = env.observation[observation_name]\n print(env.observation.spaces[observation_name].to_string(value))\n print(f\"Observation {observation_name} in {timer}\")\n break\n elif c == \"r\":\n print(\"reward\", flush=True)\n reward_name = user_input.read_list_value(\"Rewards\", reward_names)\n with Timer(f\"Reward {reward_name}\"):\n print(f\"{env.reward[reward_name]:.6f}\")\n break\n elif c == \"c\":\n print(\"commandline\")\n print(\"$\", env.commandline(), flush=True)\n break\n elif c == \"e\":\n print(\"end\", flush=True)\n with Timer(\"Closed environment\"):\n env.close()\n print(\"Have a nice day!\")\n return\n\n\ndef main(argv):\n \"\"\"Main entry point.\"\"\"\n argv = FLAGS(argv)\n if len(argv) != 1:\n raise app.UsageError(f\"Unknown command line arguments: {argv[1:]}\")\n\n if FLAGS.ls_benchmark:\n benchmark = benchmark_from_flags()\n env = env_from_flags(benchmark)\n print(\"\\n\".join(sorted(env.benchmarks)))\n env.close()\n return\n\n with Timer(\"Initialized environment\"):\n benchmark = benchmark_from_flags()\n env = env_from_flags(benchmark)\n\n run_manual_env(env)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "compiler_gym/bin/manual_env.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Run a CompilerGym environment with text interface controls.\n\n.. code-block::\n\n $ python -m compiler_gym.bin.manual_env --env=<env> [--benchmark=<name>] [--observation=<space>] [--reward=<space>]\n\nThe benchmark to use can be specified using :code:`--benchmark=<name>`. If not\nprovided, you be presented with a list of benchmarks to choose from on launch.\nSelect :code:`random` to select a benchmark randomly.\n\"\"\"\nimport sys\nfrom typing import Optional\n\nfrom absl import app, flags\n\nimport compiler_gym.util.flags.ls_benchmark # Flag definition.\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.service import observation2str\nfrom compiler_gym.util import user_input\nfrom compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags\nfrom compiler_gym.util.flags.env_from_flags import env_from_flags\nfrom compiler_gym.util.shell_format import emph\nfrom compiler_gym.util.timer import Timer\n\nFLAGS = flags.FLAGS\n\n\ndef run_manual_env(env: CompilerEnv):\n \"\"\"Run an environment manually.\n\n The manual environment allows the user to step through the environment,\n selection observations, rewards, and actions to run as they see fit. This is\n useful for debugging.\n\n :param env: The environment to run.\n \"\"\"\n benchmark = None\n if not env.benchmark:\n # Allow the user to choose a benchmark, with the first choice being\n # to select randomly.\n benchmarks = sorted(env.benchmarks)\n if not benchmarks:\n print(\"No benchmarks available see https://facebookresearch.github.io/CompilerGym/getting_started.html#installing-benchmarks\")\n print(\"Exiting...\")\n env.close()\n return\n\n # Strip default benchmark:// protocol.\n for i, benchmark in enumerate(benchmarks):\n if benchmark.startswith(\"benchmark://\"):\n benchmarks[i] = benchmark[len(\"benchmark://\"):]\n\n benchmark_index = user_input.read_list_index(\n \"Benchmark\", [\"random\"] + benchmarks\n )\n if benchmark_index:\n benchmark = benchmarks[benchmark_index - 1]\n else:\n benchmark = None\n\n with Timer() as timer:\n eager_observation = env.reset(benchmark=benchmark)\n\n print(f\"Reset {env.benchmark} environment in {timer}\")\n if env.observation_space and eager_observation is not None:\n print(\n f\"Observation: {observation2str(env.observation_space, eager_observation)}\"\n )\n\n observation_names = sorted(env.observation.spaces.keys())\n reward_names = sorted(env.reward.spaces.keys())\n last_eager_reward: Optional[float] = None\n step_count = 1\n\n while True:\n print(\n f\"\\nStep {step_count}. Select: [{emph('a')}]ction \"\n f\"[{emph('o')}]bservation [{emph('r')}]eward \"\n f\"[{emph('c')}]ommandline [{emph('e')}]nd >>> \",\n end=\"\",\n flush=True,\n )\n while True:\n c = user_input.read_char()\n if c == \"a\":\n print(\"action\", flush=True)\n index = user_input.read_list_index(\n \"Actions\", [\"random\"] + env.action_space.names\n )\n step_count += 1\n with Timer() as t:\n if index == 0:\n # User selected \"random\" action.\n index = env.action_space.sample()\n else:\n # Offset to remove \"random\" action from index.\n index -= 1\n eager_observation, eager_reward, done, info = env.step(index)\n\n # Print the eager observation, if available.\n if env.observation_space and eager_observation is not None:\n print(\n f\"Observation: {observation2str(env.observation_space, eager_observation)}\"\n )\n\n # Print the eager reward and the diff, if available.\n if env.reward_space and eager_reward is not None:\n reward_diff = \"\"\n if last_eager_reward is not None and eager_reward is not None:\n reward_diff = (\n f\" (change: {eager_reward - last_eager_reward:.6f})\"\n )\n print(f\"Reward: {eager_reward:.6f}{reward_diff}\")\n last_eager_reward = eager_reward\n\n print(\n f\"Action {env.action_space.names[index]} in {t}.\",\n \" No effect.\" if info.get(\"action_had_no_effect\") else \"\",\n flush=True,\n )\n if done:\n print(\"Episode ended by environment: \", info[\"error_details\"])\n env.close()\n return\n break\n if c == \"o\":\n print(\"observation\", flush=True)\n observation_name = user_input.read_list_value(\n \"Observable values\", observation_names\n )\n with Timer() as timer:\n value = env.observation[observation_name]\n print(\n observation2str(env.observation.spaces[observation_name].id, value)\n )\n print(f\"Observation {observation_name} in {timer}\")\n break\n elif c == \"r\":\n print(\"reward\", flush=True)\n reward_name = user_input.read_list_value(\"Rewards\", reward_names)\n with Timer(f\"Reward {reward_name}\"):\n print(f\"{env.reward[reward_name]:.6f}\")\n break\n elif c == \"c\":\n print(\"commandline\")\n print(\"$\", env.commandline(), flush=True)\n break\n elif c == \"e\":\n print(\"end\", flush=True)\n with Timer(\"Closed environment\"):\n env.close()\n print(\"Have a nice day!\")\n return\n\n\ndef main(argv):\n \"\"\"Main entry point.\"\"\"\n argv = FLAGS(argv)\n if len(argv) != 1:\n raise app.UsageError(f\"Unknown command line arguments: {argv[1:]}\")\n\n if FLAGS.ls_benchmark:\n benchmark = benchmark_from_flags()\n env = env_from_flags(benchmark)\n print(\"\\n\".join(sorted(env.benchmarks)))\n env.close()\n return\n\n with Timer(\"Initialized environment\"):\n benchmark = benchmark_from_flags()\n env = env_from_flags(benchmark)\n\n run_manual_env(env)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "compiler_gym/bin/manual_env.py"}]}
| 2,889 | 210 |
gh_patches_debug_13542
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-10032
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation spelling and formatting
There are various typos scattered throughout the documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py`
Content:
```
1 import numpy as np
2
3 from bokeh.layouts import column, row
4 from bokeh.models import CustomJS, Div, Slider
5 from bokeh.plotting import ColumnDataSource, figure, output_file, show
6
7 x = np.linspace(0, 10, 500)
8 y = np.sin(x)
9
10 source = ColumnDataSource(data=dict(x=x, y=y))
11
12 plot = figure(y_range=(-10, 10), plot_width=400, plot_height=200, background_fill_color="#fafafa", sizing_mode="scale_width")
13
14 plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
15
16 amp = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", sizing_mode="stretch_both")
17 freq = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", sizing_mode="stretch_both")
18 phase = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", sizing_mode="stretch_both")
19 offset = Slider(start=-5, end=5, value=0, step=.1, title="Offset", sizing_mode="stretch_both")
20
21 widgets = column(amp, freq, phase, offset, sizing_mode="fixed", height=250, width=150)
22
23 callback = CustomJS(args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset),
24 code="""
25 const data = source.data;
26 const A = amp.value;
27 const k = freq.value;
28 const phi = phase.value;
29 const B = offset.value;
30 const x = data['x']
31 const y = data['y']
32 for (var i = 0; i < x.length; i++) {
33 y[i] = B + A*Math.sin(k*x[i]+phi);
34 }
35 source.change.emit();
36 """)
37
38 amp.js_on_change('value', callback)
39 freq.js_on_change('value', callback)
40 phase.js_on_change('value', callback)
41 offset.js_on_change('value', callback)
42
43 heading = Div(sizing_mode="stretch_width", height=80, text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
44 "Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. "
45 "Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.")
46
47 layout = column(heading, row(widgets, plot), sizing_mode="stretch_both")
48
49 output_file("slider.html", title="slider.py example")
50
51 show(layout)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py
--- a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py
+++ b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py
@@ -40,9 +40,8 @@
phase.js_on_change('value', callback)
offset.js_on_change('value', callback)
-heading = Div(sizing_mode="stretch_width", height=80, text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
-"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. "
-"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.")
+heading = Div(sizing_mode="stretch_width", height=80, text="In this wave example, the sliders on the left "
+ "can be used to change the amplitude, frequency, phase and offset of the wave.")
layout = column(heading, row(widgets, plot), sizing_mode="stretch_both")
|
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n--- a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n+++ b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n@@ -40,9 +40,8 @@\n phase.js_on_change('value', callback)\n offset.js_on_change('value', callback)\n \n-heading = Div(sizing_mode=\"stretch_width\", height=80, text=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. \"\n-\"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. \"\n-\"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.\")\n+heading = Div(sizing_mode=\"stretch_width\", height=80, text=\"In this wave example, the sliders on the left \"\n+ \"can be used to change the amplitude, frequency, phase and offset of the wave.\")\n \n layout = column(heading, row(widgets, plot), sizing_mode=\"stretch_both\")\n", "issue": "Documentation spelling and formatting\nThere are various typos scattered throughout the documentation.\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Div, Slider\nfrom bokeh.plotting import ColumnDataSource, figure, output_file, show\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=200, background_fill_color=\"#fafafa\", sizing_mode=\"scale_width\")\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\namp = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", sizing_mode=\"stretch_both\")\nfreq = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", sizing_mode=\"stretch_both\")\nphase = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", sizing_mode=\"stretch_both\")\noffset = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", sizing_mode=\"stretch_both\")\n\nwidgets = column(amp, freq, phase, offset, sizing_mode=\"fixed\", height=250, width=150)\n\ncallback = CustomJS(args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset),\n code=\"\"\"\n const data = source.data;\n const A = amp.value;\n const k = freq.value;\n const phi = phase.value;\n const B = offset.value;\n const x = data['x']\n const y = data['y']\n for (var i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.change.emit();\n\"\"\")\n\namp.js_on_change('value', callback)\nfreq.js_on_change('value', callback)\nphase.js_on_change('value', callback)\noffset.js_on_change('value', callback)\n\nheading = Div(sizing_mode=\"stretch_width\", height=80, text=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. \"\n\"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. \"\n\"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.\")\n\nlayout = column(heading, row(widgets, plot), sizing_mode=\"stretch_both\")\n\noutput_file(\"slider.html\", title=\"slider.py example\")\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py"}], "after_files": [{"content": "import numpy as np\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Div, Slider\nfrom bokeh.plotting import ColumnDataSource, figure, output_file, show\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=200, background_fill_color=\"#fafafa\", sizing_mode=\"scale_width\")\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\namp = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", sizing_mode=\"stretch_both\")\nfreq = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", sizing_mode=\"stretch_both\")\nphase = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", sizing_mode=\"stretch_both\")\noffset = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", sizing_mode=\"stretch_both\")\n\nwidgets = column(amp, freq, phase, offset, sizing_mode=\"fixed\", height=250, width=150)\n\ncallback = CustomJS(args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset),\n code=\"\"\"\n const data = source.data;\n const A = amp.value;\n const k = freq.value;\n const phi = phase.value;\n const B = offset.value;\n const x = data['x']\n const y = data['y']\n for (var i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.change.emit();\n\"\"\")\n\namp.js_on_change('value', callback)\nfreq.js_on_change('value', callback)\nphase.js_on_change('value', callback)\noffset.js_on_change('value', callback)\n\nheading = Div(sizing_mode=\"stretch_width\", height=80, text=\"In this wave example, the sliders on the left \"\n \"can be used to change the amplitude, frequency, phase and offset of the wave.\")\n\nlayout = column(heading, row(widgets, plot), sizing_mode=\"stretch_both\")\n\noutput_file(\"slider.html\", title=\"slider.py example\")\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py"}]}
| 973 | 259 |
gh_patches_debug_23031
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-4373
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] Minor discrepancy in comparing means example
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe your proposed suggestion in detail.
Using a movie watching dataset as an example but calling it "resting state" in several places
### List any pages that would be impacted.
The "comparing means of images with `math_img`" example: https://nilearn.github.io/stable/auto_examples/06_manipulating_images/plot_compare_mean_image.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/06_manipulating_images/plot_compare_mean_image.py`
Content:
```
1 """
2 Comparing the means of 2 images
3 ===============================
4
5 The goal of this example is to illustrate the use of the function
6 :func:`nilearn.image.math_img` with a list of images as input.
7 We compare the means of 2 resting state 4D images. The mean of the images
8 could have been computed with nilearn :func:`nilearn.image.mean_img` function.
9 """
10
11 # %%
12 # Fetching 2 subject movie watching brain development :term:`fMRI` datasets.
13 from nilearn import datasets
14
15 dataset = datasets.fetch_development_fmri(n_subjects=2)
16
17
18 # %%
19 # Print basic information on the adhd subjects resting state datasets.
20 print(f"Subject 1 resting state dataset at: {dataset.func[0]}")
21 print(f"Subject 2 resting state dataset at: {dataset.func[1]}")
22
23
24 # %%
25 # Comparing the means of the 2 movie watching datasets.
26 from nilearn import image, plotting
27
28 result_img = image.math_img(
29 "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)",
30 img1=dataset.func[0],
31 img2=dataset.func[1],
32 )
33
34 plotting.plot_stat_map(
35 result_img, title="Comparing means of 2 resting state 4D images."
36 )
37 plotting.show()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/06_manipulating_images/plot_compare_mean_image.py b/examples/06_manipulating_images/plot_compare_mean_image.py
--- a/examples/06_manipulating_images/plot_compare_mean_image.py
+++ b/examples/06_manipulating_images/plot_compare_mean_image.py
@@ -4,7 +4,7 @@
The goal of this example is to illustrate the use of the function
:func:`nilearn.image.math_img` with a list of images as input.
-We compare the means of 2 resting state 4D images. The mean of the images
+We compare the means of 2 movie watching 4D images. The mean of the images
could have been computed with nilearn :func:`nilearn.image.mean_img` function.
"""
@@ -16,9 +16,9 @@
# %%
-# Print basic information on the adhd subjects resting state datasets.
-print(f"Subject 1 resting state dataset at: {dataset.func[0]}")
-print(f"Subject 2 resting state dataset at: {dataset.func[1]}")
+# Print file locations for both subjects.
+print(f"Subject 1 dataset at: {dataset.func[0]}")
+print(f"Subject 2 dataset at: {dataset.func[1]}")
# %%
@@ -32,6 +32,6 @@
)
plotting.plot_stat_map(
- result_img, title="Comparing means of 2 resting state 4D images."
+ result_img, title="Comparing means of 2 movie watching 4D images."
)
plotting.show()
|
{"golden_diff": "diff --git a/examples/06_manipulating_images/plot_compare_mean_image.py b/examples/06_manipulating_images/plot_compare_mean_image.py\n--- a/examples/06_manipulating_images/plot_compare_mean_image.py\n+++ b/examples/06_manipulating_images/plot_compare_mean_image.py\n@@ -4,7 +4,7 @@\n \n The goal of this example is to illustrate the use of the function\n :func:`nilearn.image.math_img` with a list of images as input.\n-We compare the means of 2 resting state 4D images. The mean of the images\n+We compare the means of 2 movie watching 4D images. The mean of the images\n could have been computed with nilearn :func:`nilearn.image.mean_img` function.\n \"\"\"\n \n@@ -16,9 +16,9 @@\n \n \n # %%\n-# Print basic information on the adhd subjects resting state datasets.\n-print(f\"Subject 1 resting state dataset at: {dataset.func[0]}\")\n-print(f\"Subject 2 resting state dataset at: {dataset.func[1]}\")\n+# Print file locations for both subjects.\n+print(f\"Subject 1 dataset at: {dataset.func[0]}\")\n+print(f\"Subject 2 dataset at: {dataset.func[1]}\")\n \n \n # %%\n@@ -32,6 +32,6 @@\n )\n \n plotting.plot_stat_map(\n- result_img, title=\"Comparing means of 2 resting state 4D images.\"\n+ result_img, title=\"Comparing means of 2 movie watching 4D images.\"\n )\n plotting.show()\n", "issue": "[DOC] Minor discrepancy in comparing means example\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe your proposed suggestion in detail.\n\nUsing a movie watching dataset as an example but calling it \"resting state\" in several places\n\n### List any pages that would be impacted.\n\nThe \"comparing means of images with `math_img`\" example: https://nilearn.github.io/stable/auto_examples/06_manipulating_images/plot_compare_mean_image.html\n", "before_files": [{"content": "\"\"\"\nComparing the means of 2 images\n===============================\n\nThe goal of this example is to illustrate the use of the function\n:func:`nilearn.image.math_img` with a list of images as input.\nWe compare the means of 2 resting state 4D images. The mean of the images\ncould have been computed with nilearn :func:`nilearn.image.mean_img` function.\n\"\"\"\n\n# %%\n# Fetching 2 subject movie watching brain development :term:`fMRI` datasets.\nfrom nilearn import datasets\n\ndataset = datasets.fetch_development_fmri(n_subjects=2)\n\n\n# %%\n# Print basic information on the adhd subjects resting state datasets.\nprint(f\"Subject 1 resting state dataset at: {dataset.func[0]}\")\nprint(f\"Subject 2 resting state dataset at: {dataset.func[1]}\")\n\n\n# %%\n# Comparing the means of the 2 movie watching datasets.\nfrom nilearn import image, plotting\n\nresult_img = image.math_img(\n \"np.mean(img1, axis=-1) - np.mean(img2, axis=-1)\",\n img1=dataset.func[0],\n img2=dataset.func[1],\n)\n\nplotting.plot_stat_map(\n result_img, title=\"Comparing means of 2 resting state 4D images.\"\n)\nplotting.show()\n", "path": "examples/06_manipulating_images/plot_compare_mean_image.py"}], "after_files": [{"content": "\"\"\"\nComparing the means of 2 images\n===============================\n\nThe goal of this example is to illustrate the use of the function\n:func:`nilearn.image.math_img` with a list of images as input.\nWe compare the means of 2 movie watching 4D images. The mean of the images\ncould have been computed with nilearn :func:`nilearn.image.mean_img` function.\n\"\"\"\n\n# %%\n# Fetching 2 subject movie watching brain development :term:`fMRI` datasets.\nfrom nilearn import datasets\n\ndataset = datasets.fetch_development_fmri(n_subjects=2)\n\n\n# %%\n# Print file locations for both subjects.\nprint(f\"Subject 1 dataset at: {dataset.func[0]}\")\nprint(f\"Subject 2 dataset at: {dataset.func[1]}\")\n\n\n# %%\n# Comparing the means of the 2 movie watching datasets.\nfrom nilearn import image, plotting\n\nresult_img = image.math_img(\n \"np.mean(img1, axis=-1) - np.mean(img2, axis=-1)\",\n img1=dataset.func[0],\n img2=dataset.func[1],\n)\n\nplotting.plot_stat_map(\n result_img, title=\"Comparing means of 2 movie watching 4D images.\"\n)\nplotting.show()\n", "path": "examples/06_manipulating_images/plot_compare_mean_image.py"}]}
| 727 | 348 |
gh_patches_debug_5762
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4445
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not allowing attendee POST request
**I'm submitting a ...**
- [x] bug report
**Current behavior:**
Created a new event with new user account. Created a free ticket for the event. for create attendee request the response is:
```
"errors": [
{
"status": 403,
"source": {
"source": "event_id"
},
"detail": "Access Forbidden",
"title": "Access Forbidden"
}
],
"jsonapi": {
"version": "1.0"
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/attendees.py`
Content:
```
1 from flask_jwt import current_identity
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3
4 from app.api.bootstrap import api
5 from app.api.helpers.db import safe_query
6 from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity
7 from app.api.helpers.permission_manager import has_access
8 from app.api.helpers.permissions import jwt_required
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic
12 from app.models import db
13 from app.models.order import Order
14 from app.models.ticket import Ticket
15 from app.models.ticket_holder import TicketHolder
16 from app.models.user import User
17
18
19 class AttendeeListPost(ResourceList):
20 """
21 List and create Attendees through direct URL
22 """
23
24 def before_post(self, args, kwargs, data):
25 """
26 Before post method to check for required relationship and proper permissions
27 :param args:
28 :param kwargs:
29 :param data:
30 :return:
31 """
32 require_relationship(['ticket', 'event'], data)
33 if not has_access('is_coorganizer', event_id=data['event']):
34 raise ForbiddenException({'source': 'event_id'}, "Access Forbidden")
35
36 methods = ['POST']
37 schema = AttendeeSchema
38 data_layer = {'session': db.session,
39 'model': TicketHolder}
40
41
42 class AttendeeList(ResourceList):
43 """
44 List Attendees
45 """
46 def query(self, view_kwargs):
47 """
48 query method for Attendees List
49 :param view_kwargs:
50 :return:
51 """
52 query_ = self.session.query(TicketHolder)
53
54 if view_kwargs.get('order_identifier'):
55 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
56 if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',
57 id=order.user_id):
58 raise ForbiddenException({'source': ''}, 'Access Forbidden')
59 query_ = query_.join(Order).filter(Order.id == order.id)
60
61 if view_kwargs.get('ticket_id'):
62 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
63 if not has_access('is_registrar', event_id=ticket.event_id):
64 raise ForbiddenException({'source': ''}, 'Access Forbidden')
65 query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)
66
67 if view_kwargs.get('user_id'):
68 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
69 if not has_access('is_user_itself', id=user.id):
70 raise ForbiddenException({'source': ''}, 'Access Forbidden')
71 query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)
72
73 query_ = event_query(self, query_, view_kwargs, permission='is_registrar')
74 return query_
75
76 view_kwargs = True
77 methods = ['GET', ]
78 schema = AttendeeSchema
79 data_layer = {'session': db.session,
80 'model': TicketHolder,
81 'methods': {
82 'query': query
83 }}
84
85
86 class AttendeeDetail(ResourceDetail):
87 """
88 Attendee detail by id
89 """
90 def before_get_object(self, view_kwargs):
91 """
92 before get object method for attendee detail
93 :param view_kwargs:
94 :return:
95 """
96 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')
97 if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):
98 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
99
100 def before_delete_object(self, obj, kwargs):
101 """
102 before delete object method for attendee detail
103 :param obj:
104 :param kwargs:
105 :return:
106 """
107 if not has_access('is_registrar', event_id=obj.event_id):
108 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
109
110 def before_update_object(self, obj, data, kwargs):
111 """
112 before update object method for attendee detail
113 :param obj:
114 :param data:
115 :param kwargs:
116 :return:
117 """
118 if not has_access('is_registrar', event_id=obj.event_id):
119 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
120
121 if 'is_checked_in' in data:
122 if data['is_checked_in'] and 'checkin_times' not in data:
123 raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},
124 "Check in time missing while trying to check in attendee")
125
126 if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(","):
127 data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])
128
129 decorators = (jwt_required,)
130 schema = AttendeeSchema
131 data_layer = {'session': db.session,
132 'model': TicketHolder,
133 'methods': {
134 'before_get_object': before_get_object,
135 'before_update_object': before_update_object,
136 'before_delete_object': before_delete_object
137 }}
138
139
140 class AttendeeRelationshipRequired(ResourceRelationship):
141 """
142 Attendee Relationship (Required)
143 """
144 decorators = (jwt_required,)
145 methods = ['GET', 'PATCH']
146 schema = AttendeeSchema
147 data_layer = {'session': db.session,
148 'model': TicketHolder}
149
150
151 class AttendeeRelationshipOptional(ResourceRelationship):
152 """
153 Attendee Relationship(Optional)
154 """
155 decorators = (api.has_permission('is_user_itself', fetch="user_id", fetch_as="id", model=TicketHolder),)
156 schema = AttendeeSchema
157 data_layer = {'session': db.session,
158 'model': TicketHolder}
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/attendees.py b/app/api/attendees.py
--- a/app/api/attendees.py
+++ b/app/api/attendees.py
@@ -30,9 +30,8 @@
:return:
"""
require_relationship(['ticket', 'event'], data)
- if not has_access('is_coorganizer', event_id=data['event']):
- raise ForbiddenException({'source': 'event_id'}, "Access Forbidden")
+ decorators = (jwt_required,)
methods = ['POST']
schema = AttendeeSchema
data_layer = {'session': db.session,
|
{"golden_diff": "diff --git a/app/api/attendees.py b/app/api/attendees.py\n--- a/app/api/attendees.py\n+++ b/app/api/attendees.py\n@@ -30,9 +30,8 @@\n :return:\n \"\"\"\n require_relationship(['ticket', 'event'], data)\n- if not has_access('is_coorganizer', event_id=data['event']):\n- raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n \n+ decorators = (jwt_required,)\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n", "issue": "Not allowing attendee POST request\n**I'm submitting a ...**\r\n- [x] bug report\r\n\r\n**Current behavior:**\r\nCreated a new event with new user account. Created a free ticket for the event. for create attendee request the response is:\r\n```\r\n\"errors\": [\r\n {\r\n \"status\": 403,\r\n \"source\": {\r\n \"source\": \"event_id\"\r\n },\r\n \"detail\": \"Access Forbidden\",\r\n \"title\": \"Access Forbidden\"\r\n }\r\n ],\r\n \"jsonapi\": {\r\n \"version\": \"1.0\"\r\n }\r\n}\r\n```\n", "before_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n Before post method to check for required relationship and proper permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['ticket', 'event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': 'event_id'}, \"Access Forbidden\")\n\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for Attendees List\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method for attendee detail\n :param view_kwargs:\n :return:\n \"\"\"\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n before delete object method for attendee detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n \"\"\"\n before update object method for attendee detail\n :param obj:\n :param data:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n if 'is_checked_in' in data:\n if data['is_checked_in'] and 'checkin_times' not in data:\n raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},\n \"Check in time missing while trying to check in attendee\")\n\n if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(\",\"):\n data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}], "after_files": [{"content": "from flask_jwt import current_identity\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema, AttendeeSchemaPublic\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n Before post method to check for required relationship and proper permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['ticket', 'event'], data)\n\n decorators = (jwt_required,)\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for Attendees List\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) or not has_access('is_user_itself',\n id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method for attendee detail\n :param view_kwargs:\n :return:\n \"\"\"\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n before delete object method for attendee detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n \"\"\"\n before update object method for attendee detail\n :param obj:\n :param data:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n if 'is_checked_in' in data:\n if data['is_checked_in'] and 'checkin_times' not in data:\n raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},\n \"Check in time missing while trying to check in attendee\")\n\n if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(\",\"):\n data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n", "path": "app/api/attendees.py"}]}
| 2,017 | 131 |
gh_patches_debug_10165
|
rasdani/github-patches
|
git_diff
|
pypa__pip-9050
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In virtual environment populated with `flit install -s` (editable), `python3 -m pip freeze` raises `AssertionError` on MacOS
**Environment**
* pip version: 20.2.3
* Python version: 3.8
* OS: MacOS 10.15.7 and Ubuntu 20.04 (tested on two different machines)
I created the virtual environments with `python3 -m venv ...` and populated them with `flit install -s` (which installs a local script as "editable" analogously to `python3 -m pip install --editable .`.
**Description**
When I run `python3 -m pip freeze' within the virtual environment, the following exception is raised:
```
ERROR: Exception:
Traceback (most recent call last):
File "/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py", line 228, in _main
status = self.run(options, args)
File "/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/commands/freeze.py", line 101, in run
for line in freeze(**freeze_kwargs):
File "/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py", line 67, in freeze
req = FrozenRequirement.from_dist(dist)
File "/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py", line 257, in from_dist
req = direct_url_as_pep440_direct_reference(
File "/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py", line 49, in direct_url_as_pep440_direct_reference
assert not direct_url.info.editable
AssertionError
```
@sbidoul @chrahunt I note that this assertion error was introduced a few months ago (see #7612).
**Expected behavior**
I expected pip to display a list of installed modules, as it does when I run pip 20.2.3 within virtual environments created prior to mid-August, e.g.:
```
alabaster==0.7.12
appdirs==1.4.4
astroid==2.4.2
attrs==19.3.0
Babel==2.8.0
black==19.10b0
```
**How to Reproduce**
1. Go to a Github repo with a Python script under development - or clone the small project [mklists](https://github.com/tombaker/mklists) that I used for this test (and note its [pyproject.toml](https://github.com/tombaker/mklists/blob/master/pyproject.toml).
2. Create a virtual environment, e.g.: `python3 -m venv .venv`
3. Activate the virtual environment, e.g.: `source .venv/bin/activate` (MacOS) or `. .venv/bin/activate` (Ubuntu)
4. Install `flit`: `python3 -m pip install --upgrade flit`
5. Run `flit install -s` to install the local code as "editable".
6. Run `python3 -m pip freeze`.
7. Instead of displaying a list of installed modules, an exception is raised (as described above).
**Output (MacOS)**
```
577 [master] ~/github/tombaker/mklists> python3 -m venv .venv
578 [master] ~/github/tombaker/mklists> source .venv/bin/activate
(.venv) 579 [master] ~/github/tombaker/mklists> python3 -m pip install --upgrade flit
Collecting flit
Using cached flit-3.0.0-py3-none-any.whl (48 kB)
... etc ...
Successfully installed certifi-2020.6.20 chardet-3.0.4 docutils-0.16 flit-3.0.0 flit-core-3.0.0 idna-2.10 pytoml-0.1.21 requests-2.24.0 urllib3-1.25.10
WARNING: You are using pip version 20.2.1; however, version 20.2.3 is available.
You should consider upgrading via the '/Users/tbaker/github/tombaker/mklists/.venv/bin/python3 -m pip install --upgrade pip' command.
(.venv) 580 [master] ~/github/tombaker/mklists> python3 -m pip install --upgrade pip
...
Successfully installed pip-20.2.3
(.venv) 581 [master] ~/github/tombaker/mklists> flit install -s
Extras to install for deps 'all': {'test', 'doc', '.none'} I-flit.install
Installing requirements I-flit.install
Collecting pytest
Using cached pytest-6.1.1-py3-none-any.whl (272 kB)
... etc ...
Successfully installed Jinja2-2.11.2 MarkupSafe-1.1.1 ... etc ...
Symlinking mklists -> /Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/mklists I-flit.install
Writing script to /Users/tbaker/github/tombaker/mklists/.venv/bin/mklists I-flit.install
(.venv) 582 [master] ~/github/tombaker/mklists> python3 -m pip freeze
ERROR: Exception:
Traceback (most recent call last):
File "/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py", line 228, in _main
status = self.run(options, args)
File "/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/commands/freeze.py", line 101, in run
for line in freeze(**freeze_kwargs):
File "/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py", line 67, in freeze
req = FrozenRequirement.from_dist(dist)
File "/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py", line 257, in from_dist
req = direct_url_as_pep440_direct_reference(
File "/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py", line 49, in direct_url_as_pep440_direct_reference
assert not direct_url.info.editable
AssertionError
```
**Note**
On 13 October, I described this issue on [Stackoverflow](https://stackoverflow.com/questions/64339735/python-virtual-environments-created-since-mid-august-raise-assertionerror-for)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/utils/direct_url_helpers.py`
Content:
```
1 import logging
2
3 from pip._internal.models.direct_url import (
4 DIRECT_URL_METADATA_NAME,
5 ArchiveInfo,
6 DirectUrl,
7 DirectUrlValidationError,
8 DirInfo,
9 VcsInfo,
10 )
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12 from pip._internal.vcs import vcs
13
14 try:
15 from json import JSONDecodeError
16 except ImportError:
17 # PY2
18 JSONDecodeError = ValueError # type: ignore
19
20 if MYPY_CHECK_RUNNING:
21 from typing import Optional
22
23 from pip._vendor.pkg_resources import Distribution
24
25 from pip._internal.models.link import Link
26
27 logger = logging.getLogger(__name__)
28
29
30 def direct_url_as_pep440_direct_reference(direct_url, name):
31 # type: (DirectUrl, str) -> str
32 """Convert a DirectUrl to a pip requirement string."""
33 direct_url.validate() # if invalid, this is a pip bug
34 requirement = name + " @ "
35 fragments = []
36 if isinstance(direct_url.info, VcsInfo):
37 requirement += "{}+{}@{}".format(
38 direct_url.info.vcs, direct_url.url, direct_url.info.commit_id
39 )
40 elif isinstance(direct_url.info, ArchiveInfo):
41 requirement += direct_url.url
42 if direct_url.info.hash:
43 fragments.append(direct_url.info.hash)
44 else:
45 assert isinstance(direct_url.info, DirInfo)
46 # pip should never reach this point for editables, since
47 # pip freeze inspects the editable project location to produce
48 # the requirement string
49 assert not direct_url.info.editable
50 requirement += direct_url.url
51 if direct_url.subdirectory:
52 fragments.append("subdirectory=" + direct_url.subdirectory)
53 if fragments:
54 requirement += "#" + "&".join(fragments)
55 return requirement
56
57
58 def direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False):
59 # type: (Link, Optional[str], bool) -> DirectUrl
60 if link.is_vcs:
61 vcs_backend = vcs.get_backend_for_scheme(link.scheme)
62 assert vcs_backend
63 url, requested_revision, _ = (
64 vcs_backend.get_url_rev_and_auth(link.url_without_fragment)
65 )
66 # For VCS links, we need to find out and add commit_id.
67 if link_is_in_wheel_cache:
68 # If the requested VCS link corresponds to a cached
69 # wheel, it means the requested revision was an
70 # immutable commit hash, otherwise it would not have
71 # been cached. In that case we don't have a source_dir
72 # with the VCS checkout.
73 assert requested_revision
74 commit_id = requested_revision
75 else:
76 # If the wheel was not in cache, it means we have
77 # had to checkout from VCS to build and we have a source_dir
78 # which we can inspect to find out the commit id.
79 assert source_dir
80 commit_id = vcs_backend.get_revision(source_dir)
81 return DirectUrl(
82 url=url,
83 info=VcsInfo(
84 vcs=vcs_backend.name,
85 commit_id=commit_id,
86 requested_revision=requested_revision,
87 ),
88 subdirectory=link.subdirectory_fragment,
89 )
90 elif link.is_existing_dir():
91 return DirectUrl(
92 url=link.url_without_fragment,
93 info=DirInfo(),
94 subdirectory=link.subdirectory_fragment,
95 )
96 else:
97 hash = None
98 hash_name = link.hash_name
99 if hash_name:
100 hash = "{}={}".format(hash_name, link.hash)
101 return DirectUrl(
102 url=link.url_without_fragment,
103 info=ArchiveInfo(hash=hash),
104 subdirectory=link.subdirectory_fragment,
105 )
106
107
108 def dist_get_direct_url(dist):
109 # type: (Distribution) -> Optional[DirectUrl]
110 """Obtain a DirectUrl from a pkg_resource.Distribution.
111
112 Returns None if the distribution has no `direct_url.json` metadata,
113 or if `direct_url.json` is invalid.
114 """
115 if not dist.has_metadata(DIRECT_URL_METADATA_NAME):
116 return None
117 try:
118 return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME))
119 except (
120 DirectUrlValidationError,
121 JSONDecodeError,
122 UnicodeDecodeError
123 ) as e:
124 logger.warning(
125 "Error parsing %s for %s: %s",
126 DIRECT_URL_METADATA_NAME,
127 dist.project_name,
128 e,
129 )
130 return None
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/utils/direct_url_helpers.py b/src/pip/_internal/utils/direct_url_helpers.py
--- a/src/pip/_internal/utils/direct_url_helpers.py
+++ b/src/pip/_internal/utils/direct_url_helpers.py
@@ -43,10 +43,6 @@
fragments.append(direct_url.info.hash)
else:
assert isinstance(direct_url.info, DirInfo)
- # pip should never reach this point for editables, since
- # pip freeze inspects the editable project location to produce
- # the requirement string
- assert not direct_url.info.editable
requirement += direct_url.url
if direct_url.subdirectory:
fragments.append("subdirectory=" + direct_url.subdirectory)
|
{"golden_diff": "diff --git a/src/pip/_internal/utils/direct_url_helpers.py b/src/pip/_internal/utils/direct_url_helpers.py\n--- a/src/pip/_internal/utils/direct_url_helpers.py\n+++ b/src/pip/_internal/utils/direct_url_helpers.py\n@@ -43,10 +43,6 @@\n fragments.append(direct_url.info.hash)\n else:\n assert isinstance(direct_url.info, DirInfo)\n- # pip should never reach this point for editables, since\n- # pip freeze inspects the editable project location to produce\n- # the requirement string\n- assert not direct_url.info.editable\n requirement += direct_url.url\n if direct_url.subdirectory:\n fragments.append(\"subdirectory=\" + direct_url.subdirectory)\n", "issue": "In virtual environment populated with `flit install -s` (editable), `python3 -m pip freeze` raises `AssertionError` on MacOS\n**Environment**\r\n\r\n* pip version: 20.2.3\r\n* Python version: 3.8\r\n* OS: MacOS 10.15.7 and Ubuntu 20.04 (tested on two different machines)\r\n\r\nI created the virtual environments with `python3 -m venv ...` and populated them with `flit install -s` (which installs a local script as \"editable\" analogously to `python3 -m pip install --editable .`.\r\n\r\n**Description**\r\n\r\nWhen I run `python3 -m pip freeze' within the virtual environment, the following exception is raised:\r\n```\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py\", line 228, in _main\r\n status = self.run(options, args)\r\n File \"/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/commands/freeze.py\", line 101, in run\r\n for line in freeze(**freeze_kwargs):\r\n File \"/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py\", line 67, in freeze\r\n req = FrozenRequirement.from_dist(dist)\r\n File \"/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py\", line 257, in from_dist\r\n req = direct_url_as_pep440_direct_reference(\r\n File \"/Users/tbaker/venvs/dcapenv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py\", line 49, in direct_url_as_pep440_direct_reference\r\n assert not direct_url.info.editable\r\nAssertionError\r\n```\r\n@sbidoul @chrahunt I note that this assertion error was introduced a few months ago (see #7612).\r\n\r\n**Expected behavior**\r\n\r\nI expected pip to display a list of installed modules, as it does when I run pip 20.2.3 within virtual environments created prior to mid-August, e.g.:\r\n```\r\nalabaster==0.7.12\r\nappdirs==1.4.4\r\nastroid==2.4.2\r\nattrs==19.3.0\r\nBabel==2.8.0\r\nblack==19.10b0\r\n```\r\n\r\n**How to Reproduce**\r\n\r\n1. Go to a Github repo with a Python script under development - or clone the small project [mklists](https://github.com/tombaker/mklists) that I used for this test (and note its [pyproject.toml](https://github.com/tombaker/mklists/blob/master/pyproject.toml).\r\n2. Create a virtual environment, e.g.: `python3 -m venv .venv`\r\n3. Activate the virtual environment, e.g.: `source .venv/bin/activate` (MacOS) or `. .venv/bin/activate` (Ubuntu)\r\n4. Install `flit`: `python3 -m pip install --upgrade flit`\r\n5. Run `flit install -s` to install the local code as \"editable\".\r\n6. Run `python3 -m pip freeze`.\r\n7. Instead of displaying a list of installed modules, an exception is raised (as described above).\r\n\r\n**Output (MacOS)**\r\n\r\n```\r\n577 [master] ~/github/tombaker/mklists> python3 -m venv .venv\r\n\r\n578 [master] ~/github/tombaker/mklists> source .venv/bin/activate\r\n\r\n(.venv) 579 [master] ~/github/tombaker/mklists> python3 -m pip install --upgrade flit\r\nCollecting flit\r\n Using cached flit-3.0.0-py3-none-any.whl (48 kB)\r\n... etc ...\r\nSuccessfully installed certifi-2020.6.20 chardet-3.0.4 docutils-0.16 flit-3.0.0 flit-core-3.0.0 idna-2.10 pytoml-0.1.21 requests-2.24.0 urllib3-1.25.10\r\nWARNING: You are using pip version 20.2.1; however, version 20.2.3 is available.\r\nYou should consider upgrading via the '/Users/tbaker/github/tombaker/mklists/.venv/bin/python3 -m pip install --upgrade pip' command.\r\n\r\n(.venv) 580 [master] ~/github/tombaker/mklists> python3 -m pip install --upgrade pip\r\n...\r\nSuccessfully installed pip-20.2.3\r\n\r\n(.venv) 581 [master] ~/github/tombaker/mklists> flit install -s\r\nExtras to install for deps 'all': {'test', 'doc', '.none'} I-flit.install\r\nInstalling requirements I-flit.install\r\nCollecting pytest\r\n Using cached pytest-6.1.1-py3-none-any.whl (272 kB)\r\n... etc ...\r\nSuccessfully installed Jinja2-2.11.2 MarkupSafe-1.1.1 ... etc ...\r\nSymlinking mklists -> /Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/mklists I-flit.install\r\nWriting script to /Users/tbaker/github/tombaker/mklists/.venv/bin/mklists I-flit.install\r\n\r\n(.venv) 582 [master] ~/github/tombaker/mklists> python3 -m pip freeze\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py\", line 228, in _main\r\n status = self.run(options, args)\r\n File \"/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/commands/freeze.py\", line 101, in run\r\n for line in freeze(**freeze_kwargs):\r\n File \"/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py\", line 67, in freeze\r\n req = FrozenRequirement.from_dist(dist)\r\n File \"/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py\", line 257, in from_dist\r\n req = direct_url_as_pep440_direct_reference(\r\n File \"/Users/tbaker/github/tombaker/mklists/.venv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py\", line 49, in direct_url_as_pep440_direct_reference\r\n assert not direct_url.info.editable\r\nAssertionError\r\n```\r\n\r\n**Note** \r\n\r\nOn 13 October, I described this issue on [Stackoverflow](https://stackoverflow.com/questions/64339735/python-virtual-environments-created-since-mid-august-raise-assertionerror-for)\n", "before_files": [{"content": "import logging\n\nfrom pip._internal.models.direct_url import (\n DIRECT_URL_METADATA_NAME,\n ArchiveInfo,\n DirectUrl,\n DirectUrlValidationError,\n DirInfo,\n VcsInfo,\n)\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.vcs import vcs\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n # PY2\n JSONDecodeError = ValueError # type: ignore\n\nif MYPY_CHECK_RUNNING:\n from typing import Optional\n\n from pip._vendor.pkg_resources import Distribution\n\n from pip._internal.models.link import Link\n\nlogger = logging.getLogger(__name__)\n\n\ndef direct_url_as_pep440_direct_reference(direct_url, name):\n # type: (DirectUrl, str) -> str\n \"\"\"Convert a DirectUrl to a pip requirement string.\"\"\"\n direct_url.validate() # if invalid, this is a pip bug\n requirement = name + \" @ \"\n fragments = []\n if isinstance(direct_url.info, VcsInfo):\n requirement += \"{}+{}@{}\".format(\n direct_url.info.vcs, direct_url.url, direct_url.info.commit_id\n )\n elif isinstance(direct_url.info, ArchiveInfo):\n requirement += direct_url.url\n if direct_url.info.hash:\n fragments.append(direct_url.info.hash)\n else:\n assert isinstance(direct_url.info, DirInfo)\n # pip should never reach this point for editables, since\n # pip freeze inspects the editable project location to produce\n # the requirement string\n assert not direct_url.info.editable\n requirement += direct_url.url\n if direct_url.subdirectory:\n fragments.append(\"subdirectory=\" + direct_url.subdirectory)\n if fragments:\n requirement += \"#\" + \"&\".join(fragments)\n return requirement\n\n\ndef direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False):\n # type: (Link, Optional[str], bool) -> DirectUrl\n if link.is_vcs:\n vcs_backend = vcs.get_backend_for_scheme(link.scheme)\n assert vcs_backend\n url, requested_revision, _ = (\n vcs_backend.get_url_rev_and_auth(link.url_without_fragment)\n )\n # For VCS links, we need to find out and add commit_id.\n if link_is_in_wheel_cache:\n # If the requested VCS link corresponds to a cached\n # wheel, it means the requested revision was an\n # immutable commit hash, otherwise it would not have\n # been cached. In that case we don't have a source_dir\n # with the VCS checkout.\n assert requested_revision\n commit_id = requested_revision\n else:\n # If the wheel was not in cache, it means we have\n # had to checkout from VCS to build and we have a source_dir\n # which we can inspect to find out the commit id.\n assert source_dir\n commit_id = vcs_backend.get_revision(source_dir)\n return DirectUrl(\n url=url,\n info=VcsInfo(\n vcs=vcs_backend.name,\n commit_id=commit_id,\n requested_revision=requested_revision,\n ),\n subdirectory=link.subdirectory_fragment,\n )\n elif link.is_existing_dir():\n return DirectUrl(\n url=link.url_without_fragment,\n info=DirInfo(),\n subdirectory=link.subdirectory_fragment,\n )\n else:\n hash = None\n hash_name = link.hash_name\n if hash_name:\n hash = \"{}={}\".format(hash_name, link.hash)\n return DirectUrl(\n url=link.url_without_fragment,\n info=ArchiveInfo(hash=hash),\n subdirectory=link.subdirectory_fragment,\n )\n\n\ndef dist_get_direct_url(dist):\n # type: (Distribution) -> Optional[DirectUrl]\n \"\"\"Obtain a DirectUrl from a pkg_resource.Distribution.\n\n Returns None if the distribution has no `direct_url.json` metadata,\n or if `direct_url.json` is invalid.\n \"\"\"\n if not dist.has_metadata(DIRECT_URL_METADATA_NAME):\n return None\n try:\n return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME))\n except (\n DirectUrlValidationError,\n JSONDecodeError,\n UnicodeDecodeError\n ) as e:\n logger.warning(\n \"Error parsing %s for %s: %s\",\n DIRECT_URL_METADATA_NAME,\n dist.project_name,\n e,\n )\n return None\n", "path": "src/pip/_internal/utils/direct_url_helpers.py"}], "after_files": [{"content": "import logging\n\nfrom pip._internal.models.direct_url import (\n DIRECT_URL_METADATA_NAME,\n ArchiveInfo,\n DirectUrl,\n DirectUrlValidationError,\n DirInfo,\n VcsInfo,\n)\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.vcs import vcs\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n # PY2\n JSONDecodeError = ValueError # type: ignore\n\nif MYPY_CHECK_RUNNING:\n from typing import Optional\n\n from pip._vendor.pkg_resources import Distribution\n\n from pip._internal.models.link import Link\n\nlogger = logging.getLogger(__name__)\n\n\ndef direct_url_as_pep440_direct_reference(direct_url, name):\n # type: (DirectUrl, str) -> str\n \"\"\"Convert a DirectUrl to a pip requirement string.\"\"\"\n direct_url.validate() # if invalid, this is a pip bug\n requirement = name + \" @ \"\n fragments = []\n if isinstance(direct_url.info, VcsInfo):\n requirement += \"{}+{}@{}\".format(\n direct_url.info.vcs, direct_url.url, direct_url.info.commit_id\n )\n elif isinstance(direct_url.info, ArchiveInfo):\n requirement += direct_url.url\n if direct_url.info.hash:\n fragments.append(direct_url.info.hash)\n else:\n assert isinstance(direct_url.info, DirInfo)\n requirement += direct_url.url\n if direct_url.subdirectory:\n fragments.append(\"subdirectory=\" + direct_url.subdirectory)\n if fragments:\n requirement += \"#\" + \"&\".join(fragments)\n return requirement\n\n\ndef direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False):\n # type: (Link, Optional[str], bool) -> DirectUrl\n if link.is_vcs:\n vcs_backend = vcs.get_backend_for_scheme(link.scheme)\n assert vcs_backend\n url, requested_revision, _ = (\n vcs_backend.get_url_rev_and_auth(link.url_without_fragment)\n )\n # For VCS links, we need to find out and add commit_id.\n if link_is_in_wheel_cache:\n # If the requested VCS link corresponds to a cached\n # wheel, it means the requested revision was an\n # immutable commit hash, otherwise it would not have\n # been cached. In that case we don't have a source_dir\n # with the VCS checkout.\n assert requested_revision\n commit_id = requested_revision\n else:\n # If the wheel was not in cache, it means we have\n # had to checkout from VCS to build and we have a source_dir\n # which we can inspect to find out the commit id.\n assert source_dir\n commit_id = vcs_backend.get_revision(source_dir)\n return DirectUrl(\n url=url,\n info=VcsInfo(\n vcs=vcs_backend.name,\n commit_id=commit_id,\n requested_revision=requested_revision,\n ),\n subdirectory=link.subdirectory_fragment,\n )\n elif link.is_existing_dir():\n return DirectUrl(\n url=link.url_without_fragment,\n info=DirInfo(),\n subdirectory=link.subdirectory_fragment,\n )\n else:\n hash = None\n hash_name = link.hash_name\n if hash_name:\n hash = \"{}={}\".format(hash_name, link.hash)\n return DirectUrl(\n url=link.url_without_fragment,\n info=ArchiveInfo(hash=hash),\n subdirectory=link.subdirectory_fragment,\n )\n\n\ndef dist_get_direct_url(dist):\n # type: (Distribution) -> Optional[DirectUrl]\n \"\"\"Obtain a DirectUrl from a pkg_resource.Distribution.\n\n Returns None if the distribution has no `direct_url.json` metadata,\n or if `direct_url.json` is invalid.\n \"\"\"\n if not dist.has_metadata(DIRECT_URL_METADATA_NAME):\n return None\n try:\n return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME))\n except (\n DirectUrlValidationError,\n JSONDecodeError,\n UnicodeDecodeError\n ) as e:\n logger.warning(\n \"Error parsing %s for %s: %s\",\n DIRECT_URL_METADATA_NAME,\n dist.project_name,\n e,\n )\n return None\n", "path": "src/pip/_internal/utils/direct_url_helpers.py"}]}
| 3,130 | 165 |
gh_patches_debug_2739
|
rasdani/github-patches
|
git_diff
|
translate__translate-3435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multistring needs a __hash__ method
In old ttk you could do something like
``` python
foo = multistring("foo")
foodict = {foo: "bar"}
assert 'foo' in foodict
```
It seems this no longer works - not sure why, but a `__hash__` method that returns `hash(str(self))` should fix the problem i believe
@claudep @julen any thoughts on this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/misc/multistring.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2006 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 """Supports a hybrid Unicode string that can also have a list of alternate
21 strings in the strings attribute
22 """
23
24 import warnings
25
26 import six
27
28 from .deprecation import RemovedInTTK2Warning
29
30
31 def _create_text_type(newtype, string, encoding):
32 """Helper to construct a text type out of characters or bytes. Required to
33 temporarily preserve backwards compatibility. Must be removed in TTK2.
34 """
35 if isinstance(string, six.text_type):
36 return six.text_type.__new__(newtype, string)
37
38 warnings.warn(
39 'Passing non-ASCII bytes as well as the `encoding` argument to '
40 '`multistring` is deprecated. Always pass unicode characters instead.',
41 RemovedInTTK2Warning, stacklevel=2,
42 )
43 return six.text_type.__new__(newtype, string or six.binary_type(), encoding)
44
45
46 class multistring(six.text_type):
47
48 def __new__(newtype, string=u"", *args, **kwargs):
49 encoding = kwargs.pop('encoding', 'utf-8')
50 if isinstance(string, list):
51 if not string:
52 raise ValueError("multistring must contain at least one string")
53 newstring = _create_text_type(newtype, string[0], encoding)
54 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]
55 else:
56 newstring = _create_text_type(newtype, string, encoding)
57 newstring.strings = [newstring]
58 return newstring
59
60 def __init__(self, *args, **kwargs):
61 super(multistring, self).__init__()
62 if not hasattr(self, "strings"):
63 self.strings = []
64
65 def __cmp__(self, otherstring):
66 def cmp_compat(s1, s2):
67 # Python 3 compatible cmp() equivalent
68 return (s1 > s2) - (s1 < s2)
69 if isinstance(otherstring, multistring):
70 parentcompare = cmp_compat(six.text_type(self), otherstring)
71 if parentcompare:
72 return parentcompare
73 else:
74 return cmp_compat(self.strings[1:], otherstring.strings[1:])
75 elif isinstance(otherstring, six.text_type):
76 return cmp_compat(six.text_type(self), otherstring)
77 elif isinstance(otherstring, bytes):
78 return cmp_compat(self.encode('utf-8'), otherstring)
79 elif isinstance(otherstring, list) and otherstring:
80 return cmp_compat(self, multistring(otherstring))
81 else:
82 return cmp_compat(str(type(self)), str(type(otherstring)))
83
84 def __hash__(self):
85 return hash(''.join(self.strings))
86
87 def __ne__(self, otherstring):
88 return self.__cmp__(otherstring) != 0
89
90 def __eq__(self, otherstring):
91 return self.__cmp__(otherstring) == 0
92
93 def __repr__(self):
94 _repr = u"multistring(%r)" % (
95 [six.text_type(item) for item in self.strings]
96 )
97 return _repr.encode('utf-8') if six.PY2 else _repr
98
99 def __str__(self):
100 if six.PY2:
101 return self.encode('utf-8')
102 return super(multistring, self).__str__()
103
104 def replace(self, old, new, count=None):
105 if count is None:
106 newstr = multistring(super(multistring, self).replace(old, new))
107 else:
108 newstr = multistring(super(multistring, self).replace(old, new, count))
109 for s in self.strings[1:]:
110 if count is None:
111 newstr.strings.append(s.replace(old, new))
112 else:
113 newstr.strings.append(s.replace(old, new, count))
114 return newstr
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py
--- a/translate/misc/multistring.py
+++ b/translate/misc/multistring.py
@@ -82,7 +82,7 @@
return cmp_compat(str(type(self)), str(type(otherstring)))
def __hash__(self):
- return hash(''.join(self.strings))
+ return hash(str(self))
def __ne__(self, otherstring):
return self.__cmp__(otherstring) != 0
|
{"golden_diff": "diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -82,7 +82,7 @@\n return cmp_compat(str(type(self)), str(type(otherstring)))\n \n def __hash__(self):\n- return hash(''.join(self.strings))\n+ return hash(str(self))\n \n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n", "issue": "multistring needs a __hash__ method\nIn old ttk you could do something like\n\n``` python\nfoo = multistring(\"foo\")\nfoodict = {foo: \"bar\"}\nassert 'foo' in foodict\n```\n\nIt seems this no longer works - not sure why, but a `__hash__` method that returns `hash(str(self))` should fix the problem i believe\n\n@claudep @julen any thoughts on this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport warnings\n\nimport six\n\nfrom .deprecation import RemovedInTTK2Warning\n\n\ndef _create_text_type(newtype, string, encoding):\n \"\"\"Helper to construct a text type out of characters or bytes. Required to\n temporarily preserve backwards compatibility. Must be removed in TTK2.\n \"\"\"\n if isinstance(string, six.text_type):\n return six.text_type.__new__(newtype, string)\n\n warnings.warn(\n 'Passing non-ASCII bytes as well as the `encoding` argument to '\n '`multistring` is deprecated. Always pass unicode characters instead.',\n RemovedInTTK2Warning, stacklevel=2,\n )\n return six.text_type.__new__(newtype, string or six.binary_type(), encoding)\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n encoding = kwargs.pop('encoding', 'utf-8')\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = _create_text_type(newtype, string[0], encoding)\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = _create_text_type(newtype, string, encoding)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = u\"multistring(%r)\" % (\n [six.text_type(item) for item in self.strings]\n )\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def __str__(self):\n if six.PY2:\n return self.encode('utf-8')\n return super(multistring, self).__str__()\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport warnings\n\nimport six\n\nfrom .deprecation import RemovedInTTK2Warning\n\n\ndef _create_text_type(newtype, string, encoding):\n \"\"\"Helper to construct a text type out of characters or bytes. Required to\n temporarily preserve backwards compatibility. Must be removed in TTK2.\n \"\"\"\n if isinstance(string, six.text_type):\n return six.text_type.__new__(newtype, string)\n\n warnings.warn(\n 'Passing non-ASCII bytes as well as the `encoding` argument to '\n '`multistring` is deprecated. Always pass unicode characters instead.',\n RemovedInTTK2Warning, stacklevel=2,\n )\n return six.text_type.__new__(newtype, string or six.binary_type(), encoding)\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n encoding = kwargs.pop('encoding', 'utf-8')\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = _create_text_type(newtype, string[0], encoding)\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = _create_text_type(newtype, string, encoding)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(str(self))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = u\"multistring(%r)\" % (\n [six.text_type(item) for item in self.strings]\n )\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def __str__(self):\n if six.PY2:\n return self.encode('utf-8')\n return super(multistring, self).__str__()\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}]}
| 1,573 | 114 |
gh_patches_debug_45322
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3531
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[v18] J'ai perdu mes sujets suivis
Version : 18
OS Windows
Navigateur Firefox
Alors qu'une synchro a été faite entre la prod et la beta, je ne retrouve plus mes sujets suivis dans la page https://beta.zestedesavoir.com/forums/ comme on peut le voir sur la capture ci-dessous (il ne me reste plus qu'un sujet suivis au lieu de beaucoup plus)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/notification/management/commands/migrate_subscriptions.py`
Content:
```
1 # coding: utf-8
2 from django.core.management import BaseCommand
3 from django.db.models import F, Q
4 from zds.forum.models import TopicRead
5 from zds.member.models import Profile
6 from zds.mp.models import PrivateTopicRead, PrivateTopic
7 from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \
8 PrivateTopicAnswerSubscription
9 from zds.tutorialv2.models.models_database import ContentReaction, ContentRead
10
11
12 class Command(BaseCommand):
13 help = 'Migrate old subscriptions and notifications for new models.'
14
15 def handle(self, *args, **options):
16 for profile in Profile.objects.all():
17 self.stdout.write(u'Migrate all notifications of {}...'.format(profile.user.username))
18 # Forums.
19 self.stdout.write(u'Starting migration with topics...')
20 topics_followed = TopicFollowed.objects.filter(user=profile.user).values("topic").distinct().all()
21 topics_never_read = TopicRead.objects\
22 .filter(user=profile.user)\
23 .filter(topic__in=topics_followed)\
24 .select_related("topic")\
25 .exclude(post=F('topic__last_message')).all()
26
27 for topic_never_read in topics_never_read:
28 content = topic_never_read.topic.first_unread_post(profile.user)
29 if content is None:
30 content = topic_never_read.topic.last_message
31
32 # Migrate subscriptions.
33 content_object = topic_never_read.topic
34 subscription = TopicAnswerSubscription.objects.get_or_create_active(
35 user=profile.user, content_object=content_object)
36
37 # Migrate notifications.
38 subscription.send_notification(content=content, sender=content.author)
39 notification = TopicAnswerSubscription.objects\
40 .get_existing(profile.user, content_object, is_active=True).last_notification
41 notification.pubdate = content.pubdate
42 notification.save()
43
44 self.stdout.write(u'Migration about « {} » [OK]'.format(topic_never_read.topic.title))
45
46 # Private messages.
47 self.stdout.write(u'Starting migration with private topics...')
48 topics_never_read = list(PrivateTopicRead.objects
49 .filter(user=profile.user)
50 .filter(privatepost=F('privatetopic__last_message')).all())
51
52 tnrs = []
53 for tnr in topics_never_read:
54 tnrs.append(tnr.privatetopic.pk)
55
56 private_topics_unread = PrivateTopic.objects \
57 .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \
58 .exclude(pk__in=tnrs) \
59 .order_by("-pubdate") \
60 .distinct()
61
62 for private_topic_unread in private_topics_unread:
63 answer = private_topic_unread.last_read_post(profile.user)
64 if answer is None:
65 answer = private_topic_unread.get_last_answer()
66
67 # Migrate subscriptions.
68 subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(
69 user=profile.user, content_object=private_topic_unread)
70
71 # Migrate notifications.
72 subscription.send_notification(content=answer, sender=answer.author, send_email=False)
73 notification = PrivateTopicAnswerSubscription.objects\
74 .get_existing(profile.user, private_topic_unread, is_active=True).last_notification
75 notification.pubdate = answer.pubdate
76 notification.save()
77
78 self.stdout.write(u'Migration about « {} » [OK]'.format(private_topic_unread.title))
79
80 # Contents.
81 self.stdout.write(u'Starting migration with contents...')
82 content_followed_pk = ContentReaction.objects\
83 .filter(author=profile.user, related_content__public_version__isnull=False)\
84 .values_list('related_content__pk', flat=True)
85
86 content_to_read = ContentRead.objects\
87 .select_related('note')\
88 .select_related('note__author')\
89 .select_related('content')\
90 .select_related('note__related_content__public_version')\
91 .filter(user=profile.user)\
92 .exclude(note__pk=F('content__last_note__pk')).all()
93
94 for content_read in content_to_read:
95 content = content_read.content
96 if content.pk not in content_followed_pk and profile.user not in content.authors.all():
97 continue
98 reaction = content.first_unread_note(user=profile.user)
99 if reaction is None:
100 reaction = content.first_note()
101 if reaction is None:
102 continue
103
104 # Migrate subscriptions.
105 content_object = reaction.related_content
106 subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(
107 user=profile.user, content_object=content_object)
108
109 # Migrate notifications.
110 subscription.send_notification(content=reaction, sender=reaction.author)
111 notification = ContentReactionAnswerSubscription.objects\
112 .get_existing(profile.user, content_object, is_active=True).last_notification
113 notification.pubdate = reaction.pubdate
114 notification.save()
115
116 self.stdout.write(u'Migration about « {} » [OK]'.format(content.title))
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/notification/management/commands/migrate_subscriptions.py b/zds/notification/management/commands/migrate_subscriptions.py
--- a/zds/notification/management/commands/migrate_subscriptions.py
+++ b/zds/notification/management/commands/migrate_subscriptions.py
@@ -1,12 +1,12 @@
# coding: utf-8
from django.core.management import BaseCommand
from django.db.models import F, Q
-from zds.forum.models import TopicRead
+from zds.forum.models import TopicRead, Topic
from zds.member.models import Profile
from zds.mp.models import PrivateTopicRead, PrivateTopic
from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \
PrivateTopicAnswerSubscription
-from zds.tutorialv2.models.models_database import ContentReaction, ContentRead
+from zds.tutorialv2.models.models_database import ContentReaction, ContentRead, PublishableContent
class Command(BaseCommand):
@@ -24,12 +24,17 @@
.select_related("topic")\
.exclude(post=F('topic__last_message')).all()
+ # Migrate subscriptions.
+ for topic_followed in TopicFollowed.objects \
+ .filter(user=profile.user).values_list('topic', flat=True).distinct().all():
+ content_object = Topic.objects.get(id=topic_followed)
+ TopicAnswerSubscription.objects.get_or_create_active(user=profile.user, content_object=content_object)
+
for topic_never_read in topics_never_read:
content = topic_never_read.topic.first_unread_post(profile.user)
if content is None:
content = topic_never_read.topic.last_message
- # Migrate subscriptions.
content_object = topic_never_read.topic
subscription = TopicAnswerSubscription.objects.get_or_create_active(
user=profile.user, content_object=content_object)
@@ -53,6 +58,16 @@
for tnr in topics_never_read:
tnrs.append(tnr.privatetopic.pk)
+ # Migrate subscriptions.
+ private_topics = PrivateTopic.objects \
+ .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \
+ .order_by("-pubdate") \
+ .distinct()
+
+ for private_topic in private_topics:
+ PrivateTopicAnswerSubscription.objects.get_or_create_active(
+ user=profile.user, content_object=private_topic)
+
private_topics_unread = PrivateTopic.objects \
.filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \
.exclude(pk__in=tnrs) \
@@ -64,7 +79,6 @@
if answer is None:
answer = private_topic_unread.get_last_answer()
- # Migrate subscriptions.
subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(
user=profile.user, content_object=private_topic_unread)
@@ -79,6 +93,16 @@
# Contents.
self.stdout.write(u'Starting migration with contents...')
+ # Migrate subscriptions.
+ contents_followed = ContentReaction.objects \
+ .filter(author=profile.user, related_content__public_version__isnull=False) \
+ .values_list('related_content', flat=True)
+
+ for content_followed in contents_followed:
+ content_object = PublishableContent.objects.get(id=content_followed)
+ ContentReactionAnswerSubscription.objects.get_or_create_active(
+ user=profile.user, content_object=content_object)
+
content_followed_pk = ContentReaction.objects\
.filter(author=profile.user, related_content__public_version__isnull=False)\
.values_list('related_content__pk', flat=True)
@@ -101,7 +125,6 @@
if reaction is None:
continue
- # Migrate subscriptions.
content_object = reaction.related_content
subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(
user=profile.user, content_object=content_object)
|
{"golden_diff": "diff --git a/zds/notification/management/commands/migrate_subscriptions.py b/zds/notification/management/commands/migrate_subscriptions.py\n--- a/zds/notification/management/commands/migrate_subscriptions.py\n+++ b/zds/notification/management/commands/migrate_subscriptions.py\n@@ -1,12 +1,12 @@\n # coding: utf-8\n from django.core.management import BaseCommand\n from django.db.models import F, Q\n-from zds.forum.models import TopicRead\n+from zds.forum.models import TopicRead, Topic\n from zds.member.models import Profile\n from zds.mp.models import PrivateTopicRead, PrivateTopic\n from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \\\n PrivateTopicAnswerSubscription\n-from zds.tutorialv2.models.models_database import ContentReaction, ContentRead\n+from zds.tutorialv2.models.models_database import ContentReaction, ContentRead, PublishableContent\n \n \n class Command(BaseCommand):\n@@ -24,12 +24,17 @@\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message')).all()\n \n+ # Migrate subscriptions.\n+ for topic_followed in TopicFollowed.objects \\\n+ .filter(user=profile.user).values_list('topic', flat=True).distinct().all():\n+ content_object = Topic.objects.get(id=topic_followed)\n+ TopicAnswerSubscription.objects.get_or_create_active(user=profile.user, content_object=content_object)\n+\n for topic_never_read in topics_never_read:\n content = topic_never_read.topic.first_unread_post(profile.user)\n if content is None:\n content = topic_never_read.topic.last_message\n \n- # Migrate subscriptions.\n content_object = topic_never_read.topic\n subscription = TopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n@@ -53,6 +58,16 @@\n for tnr in topics_never_read:\n tnrs.append(tnr.privatetopic.pk)\n \n+ # Migrate subscriptions.\n+ private_topics = PrivateTopic.objects \\\n+ .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n+ .order_by(\"-pubdate\") \\\n+ .distinct()\n+\n+ for private_topic in private_topics:\n+ PrivateTopicAnswerSubscription.objects.get_or_create_active(\n+ user=profile.user, content_object=private_topic)\n+\n private_topics_unread = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .exclude(pk__in=tnrs) \\\n@@ -64,7 +79,6 @@\n if answer is None:\n answer = private_topic_unread.get_last_answer()\n \n- # Migrate subscriptions.\n subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic_unread)\n \n@@ -79,6 +93,16 @@\n \n # Contents.\n self.stdout.write(u'Starting migration with contents...')\n+ # Migrate subscriptions.\n+ contents_followed = ContentReaction.objects \\\n+ .filter(author=profile.user, related_content__public_version__isnull=False) \\\n+ .values_list('related_content', flat=True)\n+\n+ for content_followed in contents_followed:\n+ content_object = PublishableContent.objects.get(id=content_followed)\n+ ContentReactionAnswerSubscription.objects.get_or_create_active(\n+ user=profile.user, content_object=content_object)\n+\n content_followed_pk = ContentReaction.objects\\\n .filter(author=profile.user, related_content__public_version__isnull=False)\\\n .values_list('related_content__pk', flat=True)\n@@ -101,7 +125,6 @@\n if reaction is None:\n continue\n \n- # Migrate subscriptions.\n content_object = reaction.related_content\n subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n", "issue": "[v18] J'ai perdu mes sujets suivis\nVersion : 18\nOS Windows\nNavigateur Firefox\n\nAlors qu'une synchro a \u00e9t\u00e9 faite entre la prod et la beta, je ne retrouve plus mes sujets suivis dans la page https://beta.zestedesavoir.com/forums/ comme on peut le voir sur la capture ci-dessous (il ne me reste plus qu'un sujet suivis au lieu de beaucoup plus)\n\n\n\n", "before_files": [{"content": "# coding: utf-8\nfrom django.core.management import BaseCommand\nfrom django.db.models import F, Q\nfrom zds.forum.models import TopicRead\nfrom zds.member.models import Profile\nfrom zds.mp.models import PrivateTopicRead, PrivateTopic\nfrom zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \\\n PrivateTopicAnswerSubscription\nfrom zds.tutorialv2.models.models_database import ContentReaction, ContentRead\n\n\nclass Command(BaseCommand):\n help = 'Migrate old subscriptions and notifications for new models.'\n\n def handle(self, *args, **options):\n for profile in Profile.objects.all():\n self.stdout.write(u'Migrate all notifications of {}...'.format(profile.user.username))\n # Forums.\n self.stdout.write(u'Starting migration with topics...')\n topics_followed = TopicFollowed.objects.filter(user=profile.user).values(\"topic\").distinct().all()\n topics_never_read = TopicRead.objects\\\n .filter(user=profile.user)\\\n .filter(topic__in=topics_followed)\\\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message')).all()\n\n for topic_never_read in topics_never_read:\n content = topic_never_read.topic.first_unread_post(profile.user)\n if content is None:\n content = topic_never_read.topic.last_message\n\n # Migrate subscriptions.\n content_object = topic_never_read.topic\n subscription = TopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=content, sender=content.author)\n notification = TopicAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = content.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(topic_never_read.topic.title))\n\n # Private messages.\n self.stdout.write(u'Starting migration with private topics...')\n topics_never_read = list(PrivateTopicRead.objects\n .filter(user=profile.user)\n .filter(privatepost=F('privatetopic__last_message')).all())\n\n tnrs = []\n for tnr in topics_never_read:\n tnrs.append(tnr.privatetopic.pk)\n\n private_topics_unread = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .exclude(pk__in=tnrs) \\\n .order_by(\"-pubdate\") \\\n .distinct()\n\n for private_topic_unread in private_topics_unread:\n answer = private_topic_unread.last_read_post(profile.user)\n if answer is None:\n answer = private_topic_unread.get_last_answer()\n\n # Migrate subscriptions.\n subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic_unread)\n\n # Migrate notifications.\n subscription.send_notification(content=answer, sender=answer.author, send_email=False)\n notification = PrivateTopicAnswerSubscription.objects\\\n .get_existing(profile.user, private_topic_unread, is_active=True).last_notification\n notification.pubdate = answer.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(private_topic_unread.title))\n\n # Contents.\n self.stdout.write(u'Starting migration with contents...')\n content_followed_pk = ContentReaction.objects\\\n .filter(author=profile.user, related_content__public_version__isnull=False)\\\n .values_list('related_content__pk', flat=True)\n\n content_to_read = ContentRead.objects\\\n .select_related('note')\\\n .select_related('note__author')\\\n .select_related('content')\\\n .select_related('note__related_content__public_version')\\\n .filter(user=profile.user)\\\n .exclude(note__pk=F('content__last_note__pk')).all()\n\n for content_read in content_to_read:\n content = content_read.content\n if content.pk not in content_followed_pk and profile.user not in content.authors.all():\n continue\n reaction = content.first_unread_note(user=profile.user)\n if reaction is None:\n reaction = content.first_note()\n if reaction is None:\n continue\n\n # Migrate subscriptions.\n content_object = reaction.related_content\n subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=reaction, sender=reaction.author)\n notification = ContentReactionAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = reaction.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(content.title))\n", "path": "zds/notification/management/commands/migrate_subscriptions.py"}], "after_files": [{"content": "# coding: utf-8\nfrom django.core.management import BaseCommand\nfrom django.db.models import F, Q\nfrom zds.forum.models import TopicRead, Topic\nfrom zds.member.models import Profile\nfrom zds.mp.models import PrivateTopicRead, PrivateTopic\nfrom zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \\\n PrivateTopicAnswerSubscription\nfrom zds.tutorialv2.models.models_database import ContentReaction, ContentRead, PublishableContent\n\n\nclass Command(BaseCommand):\n help = 'Migrate old subscriptions and notifications for new models.'\n\n def handle(self, *args, **options):\n for profile in Profile.objects.all():\n self.stdout.write(u'Migrate all notifications of {}...'.format(profile.user.username))\n # Forums.\n self.stdout.write(u'Starting migration with topics...')\n topics_followed = TopicFollowed.objects.filter(user=profile.user).values(\"topic\").distinct().all()\n topics_never_read = TopicRead.objects\\\n .filter(user=profile.user)\\\n .filter(topic__in=topics_followed)\\\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message')).all()\n\n # Migrate subscriptions.\n for topic_followed in TopicFollowed.objects \\\n .filter(user=profile.user).values_list('topic', flat=True).distinct().all():\n content_object = Topic.objects.get(id=topic_followed)\n TopicAnswerSubscription.objects.get_or_create_active(user=profile.user, content_object=content_object)\n\n for topic_never_read in topics_never_read:\n content = topic_never_read.topic.first_unread_post(profile.user)\n if content is None:\n content = topic_never_read.topic.last_message\n\n content_object = topic_never_read.topic\n subscription = TopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=content, sender=content.author)\n notification = TopicAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = content.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(topic_never_read.topic.title))\n\n # Private messages.\n self.stdout.write(u'Starting migration with private topics...')\n topics_never_read = list(PrivateTopicRead.objects\n .filter(user=profile.user)\n .filter(privatepost=F('privatetopic__last_message')).all())\n\n tnrs = []\n for tnr in topics_never_read:\n tnrs.append(tnr.privatetopic.pk)\n\n # Migrate subscriptions.\n private_topics = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .order_by(\"-pubdate\") \\\n .distinct()\n\n for private_topic in private_topics:\n PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic)\n\n private_topics_unread = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .exclude(pk__in=tnrs) \\\n .order_by(\"-pubdate\") \\\n .distinct()\n\n for private_topic_unread in private_topics_unread:\n answer = private_topic_unread.last_read_post(profile.user)\n if answer is None:\n answer = private_topic_unread.get_last_answer()\n\n subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic_unread)\n\n # Migrate notifications.\n subscription.send_notification(content=answer, sender=answer.author, send_email=False)\n notification = PrivateTopicAnswerSubscription.objects\\\n .get_existing(profile.user, private_topic_unread, is_active=True).last_notification\n notification.pubdate = answer.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(private_topic_unread.title))\n\n # Contents.\n self.stdout.write(u'Starting migration with contents...')\n # Migrate subscriptions.\n contents_followed = ContentReaction.objects \\\n .filter(author=profile.user, related_content__public_version__isnull=False) \\\n .values_list('related_content', flat=True)\n\n for content_followed in contents_followed:\n content_object = PublishableContent.objects.get(id=content_followed)\n ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n content_followed_pk = ContentReaction.objects\\\n .filter(author=profile.user, related_content__public_version__isnull=False)\\\n .values_list('related_content__pk', flat=True)\n\n content_to_read = ContentRead.objects\\\n .select_related('note')\\\n .select_related('note__author')\\\n .select_related('content')\\\n .select_related('note__related_content__public_version')\\\n .filter(user=profile.user)\\\n .exclude(note__pk=F('content__last_note__pk')).all()\n\n for content_read in content_to_read:\n content = content_read.content\n if content.pk not in content_followed_pk and profile.user not in content.authors.all():\n continue\n reaction = content.first_unread_note(user=profile.user)\n if reaction is None:\n reaction = content.first_note()\n if reaction is None:\n continue\n\n content_object = reaction.related_content\n subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=reaction, sender=reaction.author)\n notification = ContentReactionAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = reaction.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(content.title))\n", "path": "zds/notification/management/commands/migrate_subscriptions.py"}]}
| 1,718 | 876 |
gh_patches_debug_33383
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-1064
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AWQ does not support Turing GPUs
@WoosukKwon Doesn't it support Turing arch? my GPU's compute capabitlity is 7.5. CUDA-12.1.
build Error message:
`ptxas /tmp/tmpxft_0006e7c4_00000000-6_gemm_kernels.ptx, line 928; error : Feature '.m16n8k16' requires .target sm_80 or higher`
If not, hope can add backward compatibility for kernel build.
_Originally posted by @esmeetu in https://github.com/vllm-project/vllm/issues/1032#issuecomment-1722179620_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/model_executor/quantization_utils/awq.py`
Content:
```
1 from typing import Any, Dict, List
2
3 import torch
4
5 from vllm.model_executor.quantization_utils.base import QuantizationConfig
6
7
8 class AWQConfig(QuantizationConfig):
9 """Config class for AWQ.
10
11 Reference: https://arxiv.org/abs/2306.00978
12 """
13
14 def __init__(
15 self,
16 weight_bits: int,
17 group_size: int,
18 zero_point: bool,
19 ) -> None:
20 self.weight_bits = weight_bits
21 self.group_size = group_size
22 self.zero_point = zero_point
23
24 if self.weight_bits != 4:
25 raise ValueError(
26 "Currently, only 4-bit weight quantization is supported for "
27 f"AWQ, but got {self.weight_bits} bits.")
28 self.pack_factor = 32 // self.weight_bits
29
30 def __repr__(self) -> str:
31 return (f"AWQConfig(weight_bits={self.weight_bits}, "
32 f"group_size={self.group_size}, "
33 f"zero_point={self.zero_point})")
34
35 @classmethod
36 def get_name(cls) -> str:
37 return "awq"
38
39 @classmethod
40 def get_supported_act_dtypes(cls) -> List[torch.dtype]:
41 return [torch.half]
42
43 @classmethod
44 def get_config_filenames(cls) -> List[str]:
45 return [
46 "quant_config.json", # E.g., casperhansen/vicuna-7b-v1.5-awq
47 "quantize_config.json", # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq # pylint: disable=line-too-long
48 ]
49
50 @classmethod
51 def from_config(cls, config: Dict[str, Any]) -> "AWQConfig":
52 weight_bits = cls.get_from_keys(config, ["w_bit", "bits"])
53 group_size = cls.get_from_keys(config, ["q_group_size", "group_size"])
54 zero_point = cls.get_from_keys(config, ["zero_point"])
55 return cls(weight_bits, group_size, zero_point)
56
57 @classmethod
58 def get_packed_tensor_names(cls) -> List[str]:
59 return ["qweight", "qzeros"]
60
61 @classmethod
62 def get_transposed_tensor_names(cls) -> List[str]:
63 return ["qweight", "qzeros", "scales"]
64
65 @classmethod
66 def get_tp_tensor_names(cls) -> List[str]:
67 return ["qweight", "qzeros", "scales"]
68
```
Path: `vllm/model_executor/quantization_utils/base.py`
Content:
```
1 from typing import Any, Dict, List
2
3 import torch
4
5
6 class QuantizationConfig:
7
8 @classmethod
9 def get_name(cls) -> str:
10 """Name of the quantization method."""
11 raise NotImplementedError
12
13 @classmethod
14 def get_supported_act_dtypes(cls) -> List[torch.dtype]:
15 """List of supported activation dtypes."""
16 raise NotImplementedError
17
18 @classmethod
19 def get_config_filenames(cls) -> List[str]:
20 """List of filenames to search for in the model directory."""
21 raise NotImplementedError
22
23 @classmethod
24 def from_config(cls, config: Dict[str, Any]) -> "QuantizationConfig":
25 """Create a config class from the model's quantization config."""
26 raise NotImplementedError
27
28 @staticmethod
29 def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:
30 """Get a value from the model's quantization config."""
31 for key in keys:
32 if key in config:
33 return config[key]
34 raise ValueError(f"Cannot find any of {keys} in the model's "
35 "quantization config.")
36
37 @classmethod
38 def get_packed_tensor_names(cls) -> List[str]:
39 raise NotImplementedError
40
41 @classmethod
42 def is_packed(cls, tensor_name: str) -> bool:
43 """Returns True if a tensor is packed.
44
45 A tensor is considered packed if each element in the tensor is a
46 packed representation of multiple elements in the original tensor.
47 For example, an INT32 element in the tensor may represent 8 INT4
48 elements in the original tensor.
49 """
50 return any(tag in tensor_name for tag in cls.get_packed_tensor_names())
51
52 @classmethod
53 def get_transposed_tensor_names(cls) -> List[str]:
54 raise NotImplementedError
55
56 @classmethod
57 def is_transposed(cls, tensor_name: str) -> bool:
58 """Returns True if a tensor is transposed relative to nn.Linear.weight.
59 """
60 return any(tag in tensor_name
61 for tag in cls.get_transposed_tensor_names())
62
63 @classmethod
64 def get_tp_tensor_names(cls) -> List[str]:
65 raise NotImplementedError
66
```
Path: `vllm/model_executor/model_loader.py`
Content:
```
1 """Utilities for selecting and loading models."""
2 import contextlib
3 from typing import Type
4
5 import torch
6 import torch.nn as nn
7 from transformers import PretrainedConfig
8
9 from vllm.config import ModelConfig
10 from vllm.model_executor.models import * # pylint: disable=wildcard-import
11 from vllm.model_executor.weight_utils import (get_quant_config,
12 initialize_dummy_weights)
13
14 # TODO(woosuk): Lazy-load the model classes.
15 _MODEL_REGISTRY = {
16 "AquilaModel": AquilaForCausalLM,
17 "BaiChuanForCausalLM": BaiChuanForCausalLM, # baichuan-7b
18 "BaichuanForCausalLM": BaichuanForCausalLM, # baichuan-13b
19 "BloomForCausalLM": BloomForCausalLM,
20 "FalconForCausalLM": FalconForCausalLM,
21 "GPT2LMHeadModel": GPT2LMHeadModel,
22 "GPTBigCodeForCausalLM": GPTBigCodeForCausalLM,
23 "GPTJForCausalLM": GPTJForCausalLM,
24 "GPTNeoXForCausalLM": GPTNeoXForCausalLM,
25 "InternLMForCausalLM": InternLMForCausalLM,
26 "LlamaForCausalLM": LlamaForCausalLM,
27 "LLaMAForCausalLM": LlamaForCausalLM, # For decapoda-research/llama-*
28 "MPTForCausalLM": MPTForCausalLM,
29 "OPTForCausalLM": OPTForCausalLM,
30 "QWenLMHeadModel": QWenLMHeadModel,
31 "RWForCausalLM": FalconForCausalLM,
32 }
33
34 # FIXME(woosuk): Remove this once all models support quantization.
35 _MODEL_CLASSES_SUPPORT_QUANTIZATION = [
36 LlamaForCausalLM,
37 ]
38
39
40 @contextlib.contextmanager
41 def _set_default_torch_dtype(dtype: torch.dtype):
42 """Sets the default torch dtype to the given dtype."""
43 old_dtype = torch.get_default_dtype()
44 torch.set_default_dtype(dtype)
45 yield
46 torch.set_default_dtype(old_dtype)
47
48
49 def _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]:
50 architectures = getattr(config, "architectures", [])
51 for arch in architectures:
52 if arch in _MODEL_REGISTRY:
53 return _MODEL_REGISTRY[arch]
54 raise ValueError(
55 f"Model architectures {architectures} are not supported for now. "
56 f"Supported architectures: {list(_MODEL_REGISTRY.keys())}")
57
58
59 def get_model(model_config: ModelConfig) -> nn.Module:
60 model_class = _get_model_architecture(model_config.hf_config)
61
62 # Get the quantization config.
63 quant_config = None
64 if model_config.quantization is not None:
65 if model_class not in _MODEL_CLASSES_SUPPORT_QUANTIZATION:
66 raise ValueError(
67 f"Quantization is not supported for {model_class}.")
68 quant_config = get_quant_config(model_config.quantization,
69 model_config.model,
70 model_config.download_dir)
71 supported_dtypes = quant_config.get_supported_act_dtypes()
72 if model_config.dtype not in supported_dtypes:
73 raise ValueError(
74 f"{model_config.dtype} is not supported for quantization "
75 f"method {model_config.quantization}. Supported dtypes: "
76 f"{supported_dtypes}")
77
78 with _set_default_torch_dtype(model_config.dtype):
79 # Create a model instance.
80 # The weights will be initialized as empty tensors.
81 if model_class in _MODEL_CLASSES_SUPPORT_QUANTIZATION:
82 model = model_class(model_config.hf_config, quant_config)
83 else:
84 model = model_class(model_config.hf_config)
85 if model_config.load_format == "dummy":
86 model = model.cuda()
87 # NOTE(woosuk): For accurate performance evaluation, we assign
88 # random values to the weights.
89 initialize_dummy_weights(model)
90 else:
91 # Load the weights from the cached or downloaded files.
92 model.load_weights(model_config.model, model_config.download_dir,
93 model_config.load_format, model_config.revision)
94 model = model.cuda()
95 return model.eval()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py
--- a/vllm/model_executor/model_loader.py
+++ b/vllm/model_executor/model_loader.py
@@ -68,6 +68,14 @@
quant_config = get_quant_config(model_config.quantization,
model_config.model,
model_config.download_dir)
+ capability = torch.cuda.get_device_capability()
+ capability = capability[0] * 10 + capability[1]
+ if capability < quant_config.get_min_capability():
+ raise ValueError(
+ f"The quantization method {model_config.quantization} is not "
+ "supported for the current GPU. "
+ f"Minimum capability: {quant_config.get_min_capability()}. "
+ f"Current capability: {capability}.")
supported_dtypes = quant_config.get_supported_act_dtypes()
if model_config.dtype not in supported_dtypes:
raise ValueError(
diff --git a/vllm/model_executor/quantization_utils/awq.py b/vllm/model_executor/quantization_utils/awq.py
--- a/vllm/model_executor/quantization_utils/awq.py
+++ b/vllm/model_executor/quantization_utils/awq.py
@@ -40,6 +40,11 @@
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
return [torch.half]
+ @classmethod
+ def get_min_capability(cls) -> int:
+ # The AWQ kernel only supports Ampere or newer GPUs.
+ return 80
+
@classmethod
def get_config_filenames(cls) -> List[str]:
return [
diff --git a/vllm/model_executor/quantization_utils/base.py b/vllm/model_executor/quantization_utils/base.py
--- a/vllm/model_executor/quantization_utils/base.py
+++ b/vllm/model_executor/quantization_utils/base.py
@@ -15,6 +15,16 @@
"""List of supported activation dtypes."""
raise NotImplementedError
+ @classmethod
+ def get_min_capability(cls) -> int:
+ """Minimum GPU capability to support the quantization method.
+
+ E.g., 70 for Volta, 75 for Turing, 80 for Ampere.
+ This requirement is due to the custom CUDA kernels used by the
+ quantization method.
+ """
+ raise NotImplementedError
+
@classmethod
def get_config_filenames(cls) -> List[str]:
"""List of filenames to search for in the model directory."""
|
{"golden_diff": "diff --git a/vllm/model_executor/model_loader.py b/vllm/model_executor/model_loader.py\n--- a/vllm/model_executor/model_loader.py\n+++ b/vllm/model_executor/model_loader.py\n@@ -68,6 +68,14 @@\n quant_config = get_quant_config(model_config.quantization,\n model_config.model,\n model_config.download_dir)\n+ capability = torch.cuda.get_device_capability()\n+ capability = capability[0] * 10 + capability[1]\n+ if capability < quant_config.get_min_capability():\n+ raise ValueError(\n+ f\"The quantization method {model_config.quantization} is not \"\n+ \"supported for the current GPU. \"\n+ f\"Minimum capability: {quant_config.get_min_capability()}. \"\n+ f\"Current capability: {capability}.\")\n supported_dtypes = quant_config.get_supported_act_dtypes()\n if model_config.dtype not in supported_dtypes:\n raise ValueError(\ndiff --git a/vllm/model_executor/quantization_utils/awq.py b/vllm/model_executor/quantization_utils/awq.py\n--- a/vllm/model_executor/quantization_utils/awq.py\n+++ b/vllm/model_executor/quantization_utils/awq.py\n@@ -40,6 +40,11 @@\n def get_supported_act_dtypes(cls) -> List[torch.dtype]:\n return [torch.half]\n \n+ @classmethod\n+ def get_min_capability(cls) -> int:\n+ # The AWQ kernel only supports Ampere or newer GPUs.\n+ return 80\n+\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n return [\ndiff --git a/vllm/model_executor/quantization_utils/base.py b/vllm/model_executor/quantization_utils/base.py\n--- a/vllm/model_executor/quantization_utils/base.py\n+++ b/vllm/model_executor/quantization_utils/base.py\n@@ -15,6 +15,16 @@\n \"\"\"List of supported activation dtypes.\"\"\"\n raise NotImplementedError\n \n+ @classmethod\n+ def get_min_capability(cls) -> int:\n+ \"\"\"Minimum GPU capability to support the quantization method.\n+\n+ E.g., 70 for Volta, 75 for Turing, 80 for Ampere.\n+ This requirement is due to the custom CUDA kernels used by the\n+ quantization method.\n+ \"\"\"\n+ raise NotImplementedError\n+\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n \"\"\"List of filenames to search for in the model directory.\"\"\"\n", "issue": "AWQ does not support Turing GPUs\n @WoosukKwon Doesn't it support Turing arch? my GPU's compute capabitlity is 7.5. CUDA-12.1.\r\n\r\nbuild Error message:\r\n\r\n `ptxas /tmp/tmpxft_0006e7c4_00000000-6_gemm_kernels.ptx, line 928; error : Feature '.m16n8k16' requires .target sm_80 or higher`\r\n\r\nIf not, hope can add backward compatibility for kernel build.\r\n\r\n_Originally posted by @esmeetu in https://github.com/vllm-project/vllm/issues/1032#issuecomment-1722179620_\r\n \n", "before_files": [{"content": "from typing import Any, Dict, List\n\nimport torch\n\nfrom vllm.model_executor.quantization_utils.base import QuantizationConfig\n\n\nclass AWQConfig(QuantizationConfig):\n \"\"\"Config class for AWQ.\n\n Reference: https://arxiv.org/abs/2306.00978\n \"\"\"\n\n def __init__(\n self,\n weight_bits: int,\n group_size: int,\n zero_point: bool,\n ) -> None:\n self.weight_bits = weight_bits\n self.group_size = group_size\n self.zero_point = zero_point\n\n if self.weight_bits != 4:\n raise ValueError(\n \"Currently, only 4-bit weight quantization is supported for \"\n f\"AWQ, but got {self.weight_bits} bits.\")\n self.pack_factor = 32 // self.weight_bits\n\n def __repr__(self) -> str:\n return (f\"AWQConfig(weight_bits={self.weight_bits}, \"\n f\"group_size={self.group_size}, \"\n f\"zero_point={self.zero_point})\")\n\n @classmethod\n def get_name(cls) -> str:\n return \"awq\"\n\n @classmethod\n def get_supported_act_dtypes(cls) -> List[torch.dtype]:\n return [torch.half]\n\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n return [\n \"quant_config.json\", # E.g., casperhansen/vicuna-7b-v1.5-awq\n \"quantize_config.json\", # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq # pylint: disable=line-too-long\n ]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"AWQConfig\":\n weight_bits = cls.get_from_keys(config, [\"w_bit\", \"bits\"])\n group_size = cls.get_from_keys(config, [\"q_group_size\", \"group_size\"])\n zero_point = cls.get_from_keys(config, [\"zero_point\"])\n return cls(weight_bits, group_size, zero_point)\n\n @classmethod\n def get_packed_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\"]\n\n @classmethod\n def get_transposed_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\", \"scales\"]\n\n @classmethod\n def get_tp_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\", \"scales\"]\n", "path": "vllm/model_executor/quantization_utils/awq.py"}, {"content": "from typing import Any, Dict, List\n\nimport torch\n\n\nclass QuantizationConfig:\n\n @classmethod\n def get_name(cls) -> str:\n \"\"\"Name of the quantization method.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_supported_act_dtypes(cls) -> List[torch.dtype]:\n \"\"\"List of supported activation dtypes.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n \"\"\"List of filenames to search for in the model directory.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"QuantizationConfig\":\n \"\"\"Create a config class from the model's quantization config.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:\n \"\"\"Get a value from the model's quantization config.\"\"\"\n for key in keys:\n if key in config:\n return config[key]\n raise ValueError(f\"Cannot find any of {keys} in the model's \"\n \"quantization config.\")\n\n @classmethod\n def get_packed_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n\n @classmethod\n def is_packed(cls, tensor_name: str) -> bool:\n \"\"\"Returns True if a tensor is packed.\n\n A tensor is considered packed if each element in the tensor is a\n packed representation of multiple elements in the original tensor.\n For example, an INT32 element in the tensor may represent 8 INT4\n elements in the original tensor.\n \"\"\"\n return any(tag in tensor_name for tag in cls.get_packed_tensor_names())\n\n @classmethod\n def get_transposed_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n\n @classmethod\n def is_transposed(cls, tensor_name: str) -> bool:\n \"\"\"Returns True if a tensor is transposed relative to nn.Linear.weight.\n \"\"\"\n return any(tag in tensor_name\n for tag in cls.get_transposed_tensor_names())\n\n @classmethod\n def get_tp_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n", "path": "vllm/model_executor/quantization_utils/base.py"}, {"content": "\"\"\"Utilities for selecting and loading models.\"\"\"\nimport contextlib\nfrom typing import Type\n\nimport torch\nimport torch.nn as nn\nfrom transformers import PretrainedConfig\n\nfrom vllm.config import ModelConfig\nfrom vllm.model_executor.models import * # pylint: disable=wildcard-import\nfrom vllm.model_executor.weight_utils import (get_quant_config,\n initialize_dummy_weights)\n\n# TODO(woosuk): Lazy-load the model classes.\n_MODEL_REGISTRY = {\n \"AquilaModel\": AquilaForCausalLM,\n \"BaiChuanForCausalLM\": BaiChuanForCausalLM, # baichuan-7b\n \"BaichuanForCausalLM\": BaichuanForCausalLM, # baichuan-13b\n \"BloomForCausalLM\": BloomForCausalLM,\n \"FalconForCausalLM\": FalconForCausalLM,\n \"GPT2LMHeadModel\": GPT2LMHeadModel,\n \"GPTBigCodeForCausalLM\": GPTBigCodeForCausalLM,\n \"GPTJForCausalLM\": GPTJForCausalLM,\n \"GPTNeoXForCausalLM\": GPTNeoXForCausalLM,\n \"InternLMForCausalLM\": InternLMForCausalLM,\n \"LlamaForCausalLM\": LlamaForCausalLM,\n \"LLaMAForCausalLM\": LlamaForCausalLM, # For decapoda-research/llama-*\n \"MPTForCausalLM\": MPTForCausalLM,\n \"OPTForCausalLM\": OPTForCausalLM,\n \"QWenLMHeadModel\": QWenLMHeadModel,\n \"RWForCausalLM\": FalconForCausalLM,\n}\n\n# FIXME(woosuk): Remove this once all models support quantization.\n_MODEL_CLASSES_SUPPORT_QUANTIZATION = [\n LlamaForCausalLM,\n]\n\n\[email protected]\ndef _set_default_torch_dtype(dtype: torch.dtype):\n \"\"\"Sets the default torch dtype to the given dtype.\"\"\"\n old_dtype = torch.get_default_dtype()\n torch.set_default_dtype(dtype)\n yield\n torch.set_default_dtype(old_dtype)\n\n\ndef _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]:\n architectures = getattr(config, \"architectures\", [])\n for arch in architectures:\n if arch in _MODEL_REGISTRY:\n return _MODEL_REGISTRY[arch]\n raise ValueError(\n f\"Model architectures {architectures} are not supported for now. \"\n f\"Supported architectures: {list(_MODEL_REGISTRY.keys())}\")\n\n\ndef get_model(model_config: ModelConfig) -> nn.Module:\n model_class = _get_model_architecture(model_config.hf_config)\n\n # Get the quantization config.\n quant_config = None\n if model_config.quantization is not None:\n if model_class not in _MODEL_CLASSES_SUPPORT_QUANTIZATION:\n raise ValueError(\n f\"Quantization is not supported for {model_class}.\")\n quant_config = get_quant_config(model_config.quantization,\n model_config.model,\n model_config.download_dir)\n supported_dtypes = quant_config.get_supported_act_dtypes()\n if model_config.dtype not in supported_dtypes:\n raise ValueError(\n f\"{model_config.dtype} is not supported for quantization \"\n f\"method {model_config.quantization}. Supported dtypes: \"\n f\"{supported_dtypes}\")\n\n with _set_default_torch_dtype(model_config.dtype):\n # Create a model instance.\n # The weights will be initialized as empty tensors.\n if model_class in _MODEL_CLASSES_SUPPORT_QUANTIZATION:\n model = model_class(model_config.hf_config, quant_config)\n else:\n model = model_class(model_config.hf_config)\n if model_config.load_format == \"dummy\":\n model = model.cuda()\n # NOTE(woosuk): For accurate performance evaluation, we assign\n # random values to the weights.\n initialize_dummy_weights(model)\n else:\n # Load the weights from the cached or downloaded files.\n model.load_weights(model_config.model, model_config.download_dir,\n model_config.load_format, model_config.revision)\n model = model.cuda()\n return model.eval()\n", "path": "vllm/model_executor/model_loader.py"}], "after_files": [{"content": "from typing import Any, Dict, List\n\nimport torch\n\nfrom vllm.model_executor.quantization_utils.base import QuantizationConfig\n\n\nclass AWQConfig(QuantizationConfig):\n \"\"\"Config class for AWQ.\n\n Reference: https://arxiv.org/abs/2306.00978\n \"\"\"\n\n def __init__(\n self,\n weight_bits: int,\n group_size: int,\n zero_point: bool,\n ) -> None:\n self.weight_bits = weight_bits\n self.group_size = group_size\n self.zero_point = zero_point\n\n if self.weight_bits != 4:\n raise ValueError(\n \"Currently, only 4-bit weight quantization is supported for \"\n f\"AWQ, but got {self.weight_bits} bits.\")\n self.pack_factor = 32 // self.weight_bits\n\n def __repr__(self) -> str:\n return (f\"AWQConfig(weight_bits={self.weight_bits}, \"\n f\"group_size={self.group_size}, \"\n f\"zero_point={self.zero_point})\")\n\n @classmethod\n def get_name(cls) -> str:\n return \"awq\"\n\n @classmethod\n def get_supported_act_dtypes(cls) -> List[torch.dtype]:\n return [torch.half]\n\n @classmethod\n def get_min_capability(cls) -> int:\n # The AWQ kernel only supports Ampere or newer GPUs.\n return 80\n\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n return [\n \"quant_config.json\", # E.g., casperhansen/vicuna-7b-v1.5-awq\n \"quantize_config.json\", # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq # pylint: disable=line-too-long\n ]\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"AWQConfig\":\n weight_bits = cls.get_from_keys(config, [\"w_bit\", \"bits\"])\n group_size = cls.get_from_keys(config, [\"q_group_size\", \"group_size\"])\n zero_point = cls.get_from_keys(config, [\"zero_point\"])\n return cls(weight_bits, group_size, zero_point)\n\n @classmethod\n def get_packed_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\"]\n\n @classmethod\n def get_transposed_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\", \"scales\"]\n\n @classmethod\n def get_tp_tensor_names(cls) -> List[str]:\n return [\"qweight\", \"qzeros\", \"scales\"]\n", "path": "vllm/model_executor/quantization_utils/awq.py"}, {"content": "from typing import Any, Dict, List\n\nimport torch\n\n\nclass QuantizationConfig:\n\n @classmethod\n def get_name(cls) -> str:\n \"\"\"Name of the quantization method.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_supported_act_dtypes(cls) -> List[torch.dtype]:\n \"\"\"List of supported activation dtypes.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_min_capability(cls) -> int:\n \"\"\"Minimum GPU capability to support the quantization method.\n\n E.g., 70 for Volta, 75 for Turing, 80 for Ampere.\n This requirement is due to the custom CUDA kernels used by the\n quantization method.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_config_filenames(cls) -> List[str]:\n \"\"\"List of filenames to search for in the model directory.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"QuantizationConfig\":\n \"\"\"Create a config class from the model's quantization config.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:\n \"\"\"Get a value from the model's quantization config.\"\"\"\n for key in keys:\n if key in config:\n return config[key]\n raise ValueError(f\"Cannot find any of {keys} in the model's \"\n \"quantization config.\")\n\n @classmethod\n def get_packed_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n\n @classmethod\n def is_packed(cls, tensor_name: str) -> bool:\n \"\"\"Returns True if a tensor is packed.\n\n A tensor is considered packed if each element in the tensor is a\n packed representation of multiple elements in the original tensor.\n For example, an INT32 element in the tensor may represent 8 INT4\n elements in the original tensor.\n \"\"\"\n return any(tag in tensor_name for tag in cls.get_packed_tensor_names())\n\n @classmethod\n def get_transposed_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n\n @classmethod\n def is_transposed(cls, tensor_name: str) -> bool:\n \"\"\"Returns True if a tensor is transposed relative to nn.Linear.weight.\n \"\"\"\n return any(tag in tensor_name\n for tag in cls.get_transposed_tensor_names())\n\n @classmethod\n def get_tp_tensor_names(cls) -> List[str]:\n raise NotImplementedError\n", "path": "vllm/model_executor/quantization_utils/base.py"}, {"content": "\"\"\"Utilities for selecting and loading models.\"\"\"\nimport contextlib\nfrom typing import Type\n\nimport torch\nimport torch.nn as nn\nfrom transformers import PretrainedConfig\n\nfrom vllm.config import ModelConfig\nfrom vllm.model_executor.models import * # pylint: disable=wildcard-import\nfrom vllm.model_executor.weight_utils import (get_quant_config,\n initialize_dummy_weights)\n\n# TODO(woosuk): Lazy-load the model classes.\n_MODEL_REGISTRY = {\n \"AquilaModel\": AquilaForCausalLM,\n \"BaiChuanForCausalLM\": BaiChuanForCausalLM, # baichuan-7b\n \"BaichuanForCausalLM\": BaichuanForCausalLM, # baichuan-13b\n \"BloomForCausalLM\": BloomForCausalLM,\n \"FalconForCausalLM\": FalconForCausalLM,\n \"GPT2LMHeadModel\": GPT2LMHeadModel,\n \"GPTBigCodeForCausalLM\": GPTBigCodeForCausalLM,\n \"GPTJForCausalLM\": GPTJForCausalLM,\n \"GPTNeoXForCausalLM\": GPTNeoXForCausalLM,\n \"InternLMForCausalLM\": InternLMForCausalLM,\n \"LlamaForCausalLM\": LlamaForCausalLM,\n \"LLaMAForCausalLM\": LlamaForCausalLM, # For decapoda-research/llama-*\n \"MPTForCausalLM\": MPTForCausalLM,\n \"OPTForCausalLM\": OPTForCausalLM,\n \"QWenLMHeadModel\": QWenLMHeadModel,\n \"RWForCausalLM\": FalconForCausalLM,\n}\n\n# FIXME(woosuk): Remove this once all models support quantization.\n_MODEL_CLASSES_SUPPORT_QUANTIZATION = [\n LlamaForCausalLM,\n]\n\n\[email protected]\ndef _set_default_torch_dtype(dtype: torch.dtype):\n \"\"\"Sets the default torch dtype to the given dtype.\"\"\"\n old_dtype = torch.get_default_dtype()\n torch.set_default_dtype(dtype)\n yield\n torch.set_default_dtype(old_dtype)\n\n\ndef _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]:\n architectures = getattr(config, \"architectures\", [])\n for arch in architectures:\n if arch in _MODEL_REGISTRY:\n return _MODEL_REGISTRY[arch]\n raise ValueError(\n f\"Model architectures {architectures} are not supported for now. \"\n f\"Supported architectures: {list(_MODEL_REGISTRY.keys())}\")\n\n\ndef get_model(model_config: ModelConfig) -> nn.Module:\n model_class = _get_model_architecture(model_config.hf_config)\n\n # Get the quantization config.\n quant_config = None\n if model_config.quantization is not None:\n if model_class not in _MODEL_CLASSES_SUPPORT_QUANTIZATION:\n raise ValueError(\n f\"Quantization is not supported for {model_class}.\")\n quant_config = get_quant_config(model_config.quantization,\n model_config.model,\n model_config.download_dir)\n capability = torch.cuda.get_device_capability()\n capability = capability[0] * 10 + capability[1]\n if capability < quant_config.get_min_capability():\n raise ValueError(\n f\"The quantization method {model_config.quantization} is not \"\n \"supported for the current GPU. \"\n f\"Minimum capability: {quant_config.get_min_capability()}. \"\n f\"Current capability: {capability}.\")\n supported_dtypes = quant_config.get_supported_act_dtypes()\n if model_config.dtype not in supported_dtypes:\n raise ValueError(\n f\"{model_config.dtype} is not supported for quantization \"\n f\"method {model_config.quantization}. Supported dtypes: \"\n f\"{supported_dtypes}\")\n\n with _set_default_torch_dtype(model_config.dtype):\n # Create a model instance.\n # The weights will be initialized as empty tensors.\n if model_class in _MODEL_CLASSES_SUPPORT_QUANTIZATION:\n model = model_class(model_config.hf_config, quant_config)\n else:\n model = model_class(model_config.hf_config)\n if model_config.load_format == \"dummy\":\n model = model.cuda()\n # NOTE(woosuk): For accurate performance evaluation, we assign\n # random values to the weights.\n initialize_dummy_weights(model)\n else:\n # Load the weights from the cached or downloaded files.\n model.load_weights(model_config.model, model_config.download_dir,\n model_config.load_format, model_config.revision)\n model = model.cuda()\n return model.eval()\n", "path": "vllm/model_executor/model_loader.py"}]}
| 2,844 | 556 |
gh_patches_debug_14068
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3269
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add openAPI Specification for /users/ endpoint
Generate spec for `users` endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/openapi.py`
Content:
```
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -1,9 +1,15 @@
def custom_preprocessing_hook(endpoints):
- filtered = []
- for (path, path_regex, method, callback) in endpoints:
- # Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"):
- filtered.append((path, path_regex, method, callback))
+ prefixes = [
+ "/api/db/v0/databases/",
+ "/api/db/v0/data_files/",
+ "/api/db/v0/schemas/",
+ "/api/db/v0/tables/",
+ "/api/db/v0/links/",
+ "/api/db/v0/queries/",
+ "/api/ui/v0/databases/",
+ "/api/ui/v0/users/"
+ ]
+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]
return filtered
|
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,9 +1,15 @@\n def custom_preprocessing_hook(endpoints):\n- filtered = []\n- for (path, path_regex, method, callback) in endpoints:\n- # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n- filtered.append((path, path_regex, method, callback))\n+ prefixes = [\n+ \"/api/db/v0/databases/\",\n+ \"/api/db/v0/data_files/\",\n+ \"/api/db/v0/schemas/\",\n+ \"/api/db/v0/tables/\",\n+ \"/api/db/v0/links/\",\n+ \"/api/db/v0/queries/\",\n+ \"/api/ui/v0/databases/\",\n+ \"/api/ui/v0/users/\"\n+ ]\n+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n", "issue": "Add openAPI Specification for /users/ endpoint\nGenerate spec for `users` endpoint\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n prefixes = [\n \"/api/db/v0/databases/\",\n \"/api/db/v0/data_files/\",\n \"/api/db/v0/schemas/\",\n \"/api/db/v0/tables/\",\n \"/api/db/v0/links/\",\n \"/api/db/v0/queries/\",\n \"/api/ui/v0/databases/\",\n \"/api/ui/v0/users/\"\n ]\n filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
| 539 | 271 |
gh_patches_debug_24734
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-6304
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
investigate and implement improvements to header-based tor2web detection for current active tor2web proxies
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/main.py`
Content:
```
1 import operator
2 import os
3 import io
4
5 from base64 import urlsafe_b64encode
6 from datetime import datetime, timedelta, timezone
7 from typing import Union
8
9 import werkzeug
10 from flask import (Blueprint, render_template, flash, redirect, url_for,
11 session, current_app, request, Markup, abort, g, make_response)
12 from flask_babel import gettext
13
14 import store
15
16 from store import Storage
17
18 from db import db
19 from encryption import EncryptionManager, GpgKeyNotFoundError
20
21 from models import Submission, Reply, get_one_or_else, InstanceConfig
22 from passphrases import PassphraseGenerator, DicewarePassphrase
23 from sdconfig import SDConfig
24 from source_app.decorators import login_required
25 from source_app.session_manager import SessionManager
26 from source_app.utils import normalize_timestamps, fit_codenames_into_cookie, \
27 clear_session_and_redirect_to_logged_out_page
28 from source_app.forms import LoginForm, SubmissionForm
29 from source_user import InvalidPassphraseError, create_source_user, \
30 SourcePassphraseCollisionError, SourceDesignationCollisionError, SourceUser
31
32
33 def make_blueprint(config: SDConfig) -> Blueprint:
34 view = Blueprint('main', __name__)
35
36 @view.route('/')
37 def index() -> str:
38 return render_template('index.html')
39
40 @view.route('/generate', methods=('GET',))
41 def generate() -> Union[str, werkzeug.Response]:
42 if SessionManager.is_user_logged_in(db_session=db.session):
43 flash(gettext(
44 "You were redirected because you are already logged in. "
45 "If you want to create a new account, you should log out "
46 "first."),
47 "notification")
48 return redirect(url_for('.lookup'))
49
50 codename = PassphraseGenerator.get_default().generate_passphrase(
51 preferred_language=g.localeinfo.language
52 )
53
54 # Generate a unique id for each browser tab and associate the codename with this id.
55 # This will allow retrieval of the codename displayed in the tab from which the source has
56 # clicked to proceed to /generate (ref. issue #4458)
57 tab_id = urlsafe_b64encode(os.urandom(64)).decode()
58 codenames = session.get('codenames', {})
59 codenames[tab_id] = codename
60 session['codenames'] = fit_codenames_into_cookie(codenames)
61 session["codenames_expire"] = datetime.now(timezone.utc) + timedelta(
62 minutes=config.SESSION_EXPIRATION_MINUTES
63 )
64 return render_template('generate.html', codename=codename, tab_id=tab_id)
65
66 @view.route('/create', methods=['POST'])
67 def create() -> werkzeug.Response:
68 if SessionManager.is_user_logged_in(db_session=db.session):
69 flash(gettext("You are already logged in. Please verify your codename above as it " +
70 "may differ from the one displayed on the previous page."),
71 'notification')
72 else:
73 # Ensure the codenames have not expired
74 date_codenames_expire = session.get("codenames_expire")
75 if not date_codenames_expire or datetime.now(timezone.utc) >= date_codenames_expire:
76 return clear_session_and_redirect_to_logged_out_page(flask_session=session)
77
78 tab_id = request.form['tab_id']
79 codename = session['codenames'][tab_id]
80 del session['codenames']
81
82 try:
83 current_app.logger.info("Creating new source user...")
84 create_source_user(
85 db_session=db.session,
86 source_passphrase=codename,
87 source_app_storage=Storage.get_default(),
88 )
89 except (SourcePassphraseCollisionError, SourceDesignationCollisionError) as e:
90 current_app.logger.error("Could not create a source: {}".format(e))
91 flash(
92 gettext(
93 "There was a temporary problem creating your account. Please try again."
94 ),
95 "error"
96 )
97 return redirect(url_for('.index'))
98
99 # All done - source user was successfully created
100 current_app.logger.info("New source user created")
101 session['new_user_codename'] = codename
102 SessionManager.log_user_in(db_session=db.session,
103 supplied_passphrase=DicewarePassphrase(codename))
104
105 return redirect(url_for('.lookup'))
106
107 @view.route('/lookup', methods=('GET',))
108 @login_required
109 def lookup(logged_in_source: SourceUser) -> str:
110 replies = []
111 logged_in_source_in_db = logged_in_source.get_db_record()
112 source_inbox = Reply.query.filter_by(
113 source_id=logged_in_source_in_db.id, deleted_by_source=False
114 ).all()
115
116 for reply in source_inbox:
117 reply_path = Storage.get_default().path(
118 logged_in_source.filesystem_id,
119 reply.filename,
120 )
121 try:
122 with io.open(reply_path, "rb") as f:
123 contents = f.read()
124 decrypted_reply = EncryptionManager.get_default().decrypt_journalist_reply(
125 for_source_user=logged_in_source,
126 ciphertext_in=contents
127 )
128 reply.decrypted = decrypted_reply
129 except UnicodeDecodeError:
130 current_app.logger.error("Could not decode reply %s" %
131 reply.filename)
132 except FileNotFoundError:
133 current_app.logger.error("Reply file missing: %s" %
134 reply.filename)
135 else:
136 reply.date = datetime.utcfromtimestamp(
137 os.stat(reply_path).st_mtime)
138 replies.append(reply)
139
140 # Sort the replies by date
141 replies.sort(key=operator.attrgetter('date'), reverse=True)
142
143 # If not done yet, generate a keypair to encrypt replies from the journalist
144 encryption_mgr = EncryptionManager.get_default()
145 try:
146 encryption_mgr.get_source_public_key(logged_in_source.filesystem_id)
147 except GpgKeyNotFoundError:
148 encryption_mgr.generate_source_key_pair(logged_in_source)
149
150 return render_template(
151 'lookup.html',
152 is_user_logged_in=True,
153 allow_document_uploads=InstanceConfig.get_default().allow_document_uploads,
154 replies=replies,
155 new_user_codename=session.get('new_user_codename', None),
156 form=SubmissionForm(),
157 )
158
159 @view.route('/submit', methods=('POST',))
160 @login_required
161 def submit(logged_in_source: SourceUser) -> werkzeug.Response:
162 allow_document_uploads = InstanceConfig.get_default().allow_document_uploads
163 form = SubmissionForm()
164 if not form.validate():
165 for field, errors in form.errors.items():
166 for error in errors:
167 flash(error, "error")
168 return redirect(f"{url_for('main.lookup')}#flashed")
169
170 msg = request.form['msg']
171 fh = None
172 if allow_document_uploads and 'fh' in request.files:
173 fh = request.files['fh']
174
175 # Don't submit anything if it was an "empty" submission. #878
176 if not (msg or fh):
177 if allow_document_uploads:
178 flash(gettext(
179 "You must enter a message or choose a file to submit."),
180 "error")
181 else:
182 flash(gettext("You must enter a message."), "error")
183 return redirect(f"{url_for('main.lookup')}#flashed")
184
185 fnames = []
186 logged_in_source_in_db = logged_in_source.get_db_record()
187 first_submission = logged_in_source_in_db.interaction_count == 0
188
189 if not os.path.exists(Storage.get_default().path(logged_in_source.filesystem_id)):
190 current_app.logger.debug("Store directory not found for source '{}', creating one."
191 .format(logged_in_source_in_db.journalist_designation))
192 os.mkdir(Storage.get_default().path(logged_in_source.filesystem_id))
193
194 if msg:
195 logged_in_source_in_db.interaction_count += 1
196 fnames.append(
197 Storage.get_default().save_message_submission(
198 logged_in_source_in_db.filesystem_id,
199 logged_in_source_in_db.interaction_count,
200 logged_in_source_in_db.journalist_filename,
201 msg))
202 if fh:
203 logged_in_source_in_db.interaction_count += 1
204 fnames.append(
205 Storage.get_default().save_file_submission(
206 logged_in_source_in_db.filesystem_id,
207 logged_in_source_in_db.interaction_count,
208 logged_in_source_in_db.journalist_filename,
209 fh.filename,
210 fh.stream))
211
212 if first_submission:
213 flash_message = render_template(
214 'first_submission_flashed_message.html',
215 new_user_codename=session.get('new_user_codename', None),
216 )
217 flash(Markup(flash_message), "success")
218
219 else:
220 if msg and not fh:
221 html_contents = gettext('Thanks! We received your message.')
222 elif fh and not msg:
223 html_contents = gettext('Thanks! We received your document.')
224 else:
225 html_contents = gettext('Thanks! We received your message and '
226 'document.')
227
228 flash_message = render_template(
229 'next_submission_flashed_message.html',
230 html_contents=html_contents
231 )
232 flash(Markup(flash_message), "success")
233
234 new_submissions = []
235 for fname in fnames:
236 submission = Submission(logged_in_source_in_db, fname, Storage.get_default())
237 db.session.add(submission)
238 new_submissions.append(submission)
239
240 logged_in_source_in_db.pending = False
241 logged_in_source_in_db.last_updated = datetime.now(timezone.utc)
242 db.session.commit()
243
244 for sub in new_submissions:
245 store.async_add_checksum_for_file(sub, Storage.get_default())
246
247 normalize_timestamps(logged_in_source)
248
249 return redirect(f"{url_for('main.lookup')}#flashed")
250
251 @view.route('/delete', methods=('POST',))
252 @login_required
253 def delete(logged_in_source: SourceUser) -> werkzeug.Response:
254 """This deletes the reply from the source's inbox, but preserves
255 the history for journalists such that they can view conversation
256 history.
257 """
258
259 query = Reply.query.filter_by(
260 filename=request.form['reply_filename'],
261 source_id=logged_in_source.db_record_id)
262 reply = get_one_or_else(query, current_app.logger, abort)
263 reply.deleted_by_source = True
264 db.session.add(reply)
265 db.session.commit()
266
267 flash(gettext("Reply deleted"), "notification")
268 return redirect(url_for('.lookup'))
269
270 @view.route('/delete-all', methods=('POST',))
271 @login_required
272 def batch_delete(logged_in_source: SourceUser) -> werkzeug.Response:
273 replies = Reply.query.filter(Reply.source_id == logged_in_source.db_record_id) \
274 .filter(Reply.deleted_by_source == False).all() # noqa
275 if len(replies) == 0:
276 current_app.logger.error("Found no replies when at least one was "
277 "expected")
278 return redirect(url_for('.lookup'))
279
280 for reply in replies:
281 reply.deleted_by_source = True
282 db.session.add(reply)
283 db.session.commit()
284
285 flash(gettext("All replies have been deleted"), "notification")
286 return redirect(url_for('.lookup'))
287
288 @view.route('/login', methods=('GET', 'POST'))
289 def login() -> Union[str, werkzeug.Response]:
290 form = LoginForm()
291 if form.validate_on_submit():
292 try:
293 SessionManager.log_user_in(
294 db_session=db.session,
295 supplied_passphrase=DicewarePassphrase(request.form['codename'].strip())
296 )
297 except InvalidPassphraseError:
298 current_app.logger.info("Login failed for invalid codename")
299 flash(gettext("Sorry, that is not a recognized codename."), "error")
300 else:
301 # Success: a valid passphrase was supplied
302 return redirect(url_for('.lookup', from_login='1'))
303
304 return render_template('login.html', form=form)
305
306 @view.route('/logout')
307 def logout() -> Union[str, werkzeug.Response]:
308 """
309 If a user is logged in, show them a logout page that prompts them to
310 click the New Identity button in Tor Browser to complete their session.
311 Otherwise redirect to the main Source Interface page.
312 """
313 if SessionManager.is_user_logged_in(db_session=db.session):
314 SessionManager.log_user_out()
315
316 # Clear the session after we render the message so it's localized
317 # If a user specified a locale, save it and restore it
318 session.clear()
319 session['locale'] = g.localeinfo.id
320
321 return render_template('logout.html')
322 else:
323 return redirect(url_for('.index'))
324
325 @view.route('/robots.txt')
326 def robots_txt() -> werkzeug.Response:
327 """Tell robots we don't want them"""
328 resp = make_response("User-agent: *\nDisallow: /")
329 resp.headers["content-type"] = "text/plain"
330 return resp
331
332 return view
333
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -37,8 +37,15 @@
def index() -> str:
return render_template('index.html')
- @view.route('/generate', methods=('GET',))
+ @view.route('/generate', methods=('POST',))
def generate() -> Union[str, werkzeug.Response]:
+ # Try to detect Tor2Web usage by looking to see if tor2web_check got mangled
+ tor2web_check = request.form.get('tor2web_check')
+ if tor2web_check is None:
+ # Missing form field
+ abort(403)
+ elif tor2web_check != 'href="fake.onion"':
+ return redirect(url_for('info.tor2web_warning'))
if SessionManager.is_user_logged_in(db_session=db.session):
flash(gettext(
"You were redirected because you are already logged in. "
@@ -46,7 +53,6 @@
"first."),
"notification")
return redirect(url_for('.lookup'))
-
codename = PassphraseGenerator.get_default().generate_passphrase(
preferred_language=g.localeinfo.language
)
|
{"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -37,8 +37,15 @@\n def index() -> str:\n return render_template('index.html')\n \n- @view.route('/generate', methods=('GET',))\n+ @view.route('/generate', methods=('POST',))\n def generate() -> Union[str, werkzeug.Response]:\n+ # Try to detect Tor2Web usage by looking to see if tor2web_check got mangled\n+ tor2web_check = request.form.get('tor2web_check')\n+ if tor2web_check is None:\n+ # Missing form field\n+ abort(403)\n+ elif tor2web_check != 'href=\"fake.onion\"':\n+ return redirect(url_for('info.tor2web_warning'))\n if SessionManager.is_user_logged_in(db_session=db.session):\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n@@ -46,7 +53,6 @@\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n-\n codename = PassphraseGenerator.get_default().generate_passphrase(\n preferred_language=g.localeinfo.language\n )\n", "issue": "investigate and implement improvements to header-based tor2web detection for current active tor2web proxies\n\n", "before_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Union\n\nimport werkzeug\nfrom flask import (Blueprint, render_template, flash, redirect, url_for,\n session, current_app, request, Markup, abort, g, make_response)\nfrom flask_babel import gettext\n\nimport store\n\nfrom store import Storage\n\nfrom db import db\nfrom encryption import EncryptionManager, GpgKeyNotFoundError\n\nfrom models import Submission, Reply, get_one_or_else, InstanceConfig\nfrom passphrases import PassphraseGenerator, DicewarePassphrase\nfrom sdconfig import SDConfig\nfrom source_app.decorators import login_required\nfrom source_app.session_manager import SessionManager\nfrom source_app.utils import normalize_timestamps, fit_codenames_into_cookie, \\\n clear_session_and_redirect_to_logged_out_page\nfrom source_app.forms import LoginForm, SubmissionForm\nfrom source_user import InvalidPassphraseError, create_source_user, \\\n SourcePassphraseCollisionError, SourceDesignationCollisionError, SourceUser\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index() -> str:\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET',))\n def generate() -> Union[str, werkzeug.Response]:\n if SessionManager.is_user_logged_in(db_session=db.session):\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = PassphraseGenerator.get_default().generate_passphrase(\n preferred_language=g.localeinfo.language\n )\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = fit_codenames_into_cookie(codenames)\n session[\"codenames_expire\"] = datetime.now(timezone.utc) + timedelta(\n minutes=config.SESSION_EXPIRATION_MINUTES\n )\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/create', methods=['POST'])\n def create() -> werkzeug.Response:\n if SessionManager.is_user_logged_in(db_session=db.session):\n flash(gettext(\"You are already logged in. Please verify your codename above as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n # Ensure the codenames have not expired\n date_codenames_expire = session.get(\"codenames_expire\")\n if not date_codenames_expire or datetime.now(timezone.utc) >= date_codenames_expire:\n return clear_session_and_redirect_to_logged_out_page(flask_session=session)\n\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n del session['codenames']\n\n try:\n current_app.logger.info(\"Creating new source user...\")\n create_source_user(\n db_session=db.session,\n source_passphrase=codename,\n source_app_storage=Storage.get_default(),\n )\n except (SourcePassphraseCollisionError, SourceDesignationCollisionError) as e:\n current_app.logger.error(\"Could not create a source: {}\".format(e))\n flash(\n gettext(\n \"There was a temporary problem creating your account. Please try again.\"\n ),\n \"error\"\n )\n return redirect(url_for('.index'))\n\n # All done - source user was successfully created\n current_app.logger.info(\"New source user created\")\n session['new_user_codename'] = codename\n SessionManager.log_user_in(db_session=db.session,\n supplied_passphrase=DicewarePassphrase(codename))\n\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup(logged_in_source: SourceUser) -> str:\n replies = []\n logged_in_source_in_db = logged_in_source.get_db_record()\n source_inbox = Reply.query.filter_by(\n source_id=logged_in_source_in_db.id, deleted_by_source=False\n ).all()\n\n for reply in source_inbox:\n reply_path = Storage.get_default().path(\n logged_in_source.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n decrypted_reply = EncryptionManager.get_default().decrypt_journalist_reply(\n for_source_user=logged_in_source,\n ciphertext_in=contents\n )\n reply.decrypted = decrypted_reply\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n except FileNotFoundError:\n current_app.logger.error(\"Reply file missing: %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # If not done yet, generate a keypair to encrypt replies from the journalist\n encryption_mgr = EncryptionManager.get_default()\n try:\n encryption_mgr.get_source_public_key(logged_in_source.filesystem_id)\n except GpgKeyNotFoundError:\n encryption_mgr.generate_source_key_pair(logged_in_source)\n\n return render_template(\n 'lookup.html',\n is_user_logged_in=True,\n allow_document_uploads=InstanceConfig.get_default().allow_document_uploads,\n replies=replies,\n new_user_codename=session.get('new_user_codename', None),\n form=SubmissionForm(),\n )\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit(logged_in_source: SourceUser) -> werkzeug.Response:\n allow_document_uploads = InstanceConfig.get_default().allow_document_uploads\n form = SubmissionForm()\n if not form.validate():\n for field, errors in form.errors.items():\n for error in errors:\n flash(error, \"error\")\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n fnames = []\n logged_in_source_in_db = logged_in_source.get_db_record()\n first_submission = logged_in_source_in_db.interaction_count == 0\n\n if not os.path.exists(Storage.get_default().path(logged_in_source.filesystem_id)):\n current_app.logger.debug(\"Store directory not found for source '{}', creating one.\"\n .format(logged_in_source_in_db.journalist_designation))\n os.mkdir(Storage.get_default().path(logged_in_source.filesystem_id))\n\n if msg:\n logged_in_source_in_db.interaction_count += 1\n fnames.append(\n Storage.get_default().save_message_submission(\n logged_in_source_in_db.filesystem_id,\n logged_in_source_in_db.interaction_count,\n logged_in_source_in_db.journalist_filename,\n msg))\n if fh:\n logged_in_source_in_db.interaction_count += 1\n fnames.append(\n Storage.get_default().save_file_submission(\n logged_in_source_in_db.filesystem_id,\n logged_in_source_in_db.interaction_count,\n logged_in_source_in_db.journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n flash_message = render_template(\n 'first_submission_flashed_message.html',\n new_user_codename=session.get('new_user_codename', None),\n )\n flash(Markup(flash_message), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif fh and not msg:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n flash_message = render_template(\n 'next_submission_flashed_message.html',\n html_contents=html_contents\n )\n flash(Markup(flash_message), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(logged_in_source_in_db, fname, Storage.get_default())\n db.session.add(submission)\n new_submissions.append(submission)\n\n logged_in_source_in_db.pending = False\n logged_in_source_in_db.last_updated = datetime.now(timezone.utc)\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub, Storage.get_default())\n\n normalize_timestamps(logged_in_source)\n\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete(logged_in_source: SourceUser) -> werkzeug.Response:\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=logged_in_source.db_record_id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete(logged_in_source: SourceUser) -> werkzeug.Response:\n replies = Reply.query.filter(Reply.source_id == logged_in_source.db_record_id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login() -> Union[str, werkzeug.Response]:\n form = LoginForm()\n if form.validate_on_submit():\n try:\n SessionManager.log_user_in(\n db_session=db.session,\n supplied_passphrase=DicewarePassphrase(request.form['codename'].strip())\n )\n except InvalidPassphraseError:\n current_app.logger.info(\"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"), \"error\")\n else:\n # Success: a valid passphrase was supplied\n return redirect(url_for('.lookup', from_login='1'))\n\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout() -> Union[str, werkzeug.Response]:\n \"\"\"\n If a user is logged in, show them a logout page that prompts them to\n click the New Identity button in Tor Browser to complete their session.\n Otherwise redirect to the main Source Interface page.\n \"\"\"\n if SessionManager.is_user_logged_in(db_session=db.session):\n SessionManager.log_user_out()\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n session.clear()\n session['locale'] = g.localeinfo.id\n\n return render_template('logout.html')\n else:\n return redirect(url_for('.index'))\n\n @view.route('/robots.txt')\n def robots_txt() -> werkzeug.Response:\n \"\"\"Tell robots we don't want them\"\"\"\n resp = make_response(\"User-agent: *\\nDisallow: /\")\n resp.headers[\"content-type\"] = \"text/plain\"\n return resp\n\n return view\n", "path": "securedrop/source_app/main.py"}], "after_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Union\n\nimport werkzeug\nfrom flask import (Blueprint, render_template, flash, redirect, url_for,\n session, current_app, request, Markup, abort, g)\nfrom flask_babel import gettext\n\nimport store\n\nfrom store import Storage\n\nfrom db import db\nfrom encryption import EncryptionManager, GpgKeyNotFoundError\n\nfrom models import Submission, Reply, get_one_or_else, InstanceConfig\nfrom passphrases import PassphraseGenerator, DicewarePassphrase\nfrom sdconfig import SDConfig\nfrom source_app.decorators import login_required\nfrom source_app.session_manager import SessionManager\nfrom source_app.utils import normalize_timestamps, fit_codenames_into_cookie, \\\n clear_session_and_redirect_to_logged_out_page\nfrom source_app.forms import LoginForm, SubmissionForm\nfrom source_user import InvalidPassphraseError, create_source_user, \\\n SourcePassphraseCollisionError, SourceDesignationCollisionError, SourceUser\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index() -> str:\n return render_template('index.html')\n\n @view.route('/generate', methods=('POST',))\n def generate() -> Union[str, werkzeug.Response]:\n # Try to detect Tor2Web usage by looking to see if tor2web_check got mangled\n tor2web_check = request.form.get('tor2web_check')\n if tor2web_check is None:\n # Missing form field\n abort(403)\n elif tor2web_check != 'href=\"fake.onion\"':\n return redirect(url_for('info.tor2web_warning'))\n if SessionManager.is_user_logged_in(db_session=db.session):\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n codename = PassphraseGenerator.get_default().generate_passphrase(\n preferred_language=g.localeinfo.language\n )\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = fit_codenames_into_cookie(codenames)\n session[\"codenames_expire\"] = datetime.now(timezone.utc) + timedelta(\n minutes=config.SESSION_EXPIRATION_MINUTES\n )\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/create', methods=['POST'])\n def create() -> werkzeug.Response:\n if SessionManager.is_user_logged_in(db_session=db.session):\n flash(gettext(\"You are already logged in. Please verify your codename above as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n # Ensure the codenames have not expired\n date_codenames_expire = session.get(\"codenames_expire\")\n if not date_codenames_expire or datetime.now(timezone.utc) >= date_codenames_expire:\n return clear_session_and_redirect_to_logged_out_page(flask_session=session)\n\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n del session['codenames']\n\n try:\n current_app.logger.info(\"Creating new source user...\")\n create_source_user(\n db_session=db.session,\n source_passphrase=codename,\n source_app_storage=Storage.get_default(),\n )\n except (SourcePassphraseCollisionError, SourceDesignationCollisionError) as e:\n current_app.logger.error(\"Could not create a source: {}\".format(e))\n flash(\n gettext(\n \"There was a temporary problem creating your account. Please try again.\"\n ),\n \"error\"\n )\n return redirect(url_for('.index'))\n\n # All done - source user was successfully created\n current_app.logger.info(\"New source user created\")\n session['new_user_codename'] = codename\n SessionManager.log_user_in(db_session=db.session,\n supplied_passphrase=DicewarePassphrase(codename))\n\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup(logged_in_source: SourceUser) -> str:\n replies = []\n logged_in_source_in_db = logged_in_source.get_db_record()\n source_inbox = Reply.query.filter_by(\n source_id=logged_in_source_in_db.id, deleted_by_source=False\n ).all()\n\n for reply in source_inbox:\n reply_path = Storage.get_default().path(\n logged_in_source.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n decrypted_reply = EncryptionManager.get_default().decrypt_journalist_reply(\n for_source_user=logged_in_source,\n ciphertext_in=contents\n )\n reply.decrypted = decrypted_reply\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n except FileNotFoundError:\n current_app.logger.error(\"Reply file missing: %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # If not done yet, generate a keypair to encrypt replies from the journalist\n encryption_mgr = EncryptionManager.get_default()\n try:\n encryption_mgr.get_source_public_key(logged_in_source.filesystem_id)\n except GpgKeyNotFoundError:\n encryption_mgr.generate_source_key_pair(logged_in_source)\n\n return render_template(\n 'lookup.html',\n is_user_logged_in=True,\n allow_document_uploads=InstanceConfig.get_default().allow_document_uploads,\n replies=replies,\n new_user_codename=session.get('new_user_codename', None),\n form=SubmissionForm(),\n )\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit(logged_in_source: SourceUser) -> werkzeug.Response:\n allow_document_uploads = InstanceConfig.get_default().allow_document_uploads\n form = SubmissionForm()\n if not form.validate():\n for field, errors in form.errors.items():\n for error in errors:\n flash(error, \"error\")\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n fnames = []\n logged_in_source_in_db = logged_in_source.get_db_record()\n first_submission = logged_in_source_in_db.interaction_count == 0\n\n if not os.path.exists(Storage.get_default().path(logged_in_source.filesystem_id)):\n current_app.logger.debug(\"Store directory not found for source '{}', creating one.\"\n .format(logged_in_source_in_db.journalist_designation))\n os.mkdir(Storage.get_default().path(logged_in_source.filesystem_id))\n\n if msg:\n logged_in_source_in_db.interaction_count += 1\n fnames.append(\n Storage.get_default().save_message_submission(\n logged_in_source_in_db.filesystem_id,\n logged_in_source_in_db.interaction_count,\n logged_in_source_in_db.journalist_filename,\n msg))\n if fh:\n logged_in_source_in_db.interaction_count += 1\n fnames.append(\n Storage.get_default().save_file_submission(\n logged_in_source_in_db.filesystem_id,\n logged_in_source_in_db.interaction_count,\n logged_in_source_in_db.journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n flash_message = render_template(\n 'first_submission_flashed_message.html',\n new_user_codename=session.get('new_user_codename', None),\n )\n flash(Markup(flash_message), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif fh and not msg:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n flash_message = render_template(\n 'next_submission_flashed_message.html',\n html_contents=html_contents\n )\n flash(Markup(flash_message), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(logged_in_source_in_db, fname, Storage.get_default())\n db.session.add(submission)\n new_submissions.append(submission)\n\n logged_in_source_in_db.pending = False\n logged_in_source_in_db.last_updated = datetime.now(timezone.utc)\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub, Storage.get_default())\n\n normalize_timestamps(logged_in_source)\n\n return redirect(f\"{url_for('main.lookup')}#flashed\")\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete(logged_in_source: SourceUser) -> werkzeug.Response:\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=logged_in_source.db_record_id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete(logged_in_source: SourceUser) -> werkzeug.Response:\n replies = Reply.query.filter(Reply.source_id == logged_in_source.db_record_id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login() -> Union[str, werkzeug.Response]:\n form = LoginForm()\n if form.validate_on_submit():\n try:\n SessionManager.log_user_in(\n db_session=db.session,\n supplied_passphrase=DicewarePassphrase(request.form['codename'].strip())\n )\n except InvalidPassphraseError:\n current_app.logger.info(\"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"), \"error\")\n else:\n # Success: a valid passphrase was supplied\n return redirect(url_for('.lookup', from_login='1'))\n\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout() -> Union[str, werkzeug.Response]:\n \"\"\"\n If a user is logged in, show them a logout page that prompts them to\n click the New Identity button in Tor Browser to complete their session.\n Otherwise redirect to the main Source Interface page.\n \"\"\"\n if SessionManager.is_user_logged_in(db_session=db.session):\n SessionManager.log_user_out()\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n session.clear()\n session['locale'] = g.localeinfo.id\n\n return render_template('logout.html')\n else:\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]}
| 3,885 | 285 |
gh_patches_debug_36550
|
rasdani/github-patches
|
git_diff
|
python-gitlab__python-gitlab-2816
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support /projects/:id/job_token_scope/allowlist
I would like to access the job token allow list through the Gitlab API.
Docs:
- [Get a project’s CI/CD job token inbound allowlist](https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist)
- [Allow access to your project with a job token](https://docs.gitlab.com/ee/ci/jobs/ci_job_token.html#allow-access-to-your-project-with-a-job-token)
API endpoint:
```
GET/POST/DELETE /projects/:id/job_token_scope/allowlist
```
I currently have the following manager to access the job token scope allow list:
```
class ProjectJobTokenScopeAllowlistManager(
gitlab.mixins.ListMixin,
gitlab.mixins.CreateMixin,
gitlab.mixins.DeleteMixin,
gitlab.base.RESTManager,
):
_path = "/projects/{project_id}/job_token_scope/allowlist"
_obj_cls = gitlab.v4.objects.Project
_from_parent_attrs = {"project_id": "id"}
_create_attrs = gitlab.types.RequiredOptional(required=("target_project_id",))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gitlab/v4/objects/job_token_scope.py`
Content:
```
1 from typing import Any, cast
2
3 from gitlab.base import RESTManager, RESTObject
4 from gitlab.mixins import (
5 GetWithoutIdMixin,
6 RefreshMixin,
7 SaveMixin,
8 UpdateMethod,
9 UpdateMixin,
10 )
11
12 __all__ = [
13 "ProjectJobTokenScope",
14 "ProjectJobTokenScopeManager",
15 ]
16
17
18 class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):
19 _id_attr = None
20
21
22 class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):
23 _path = "/projects/{project_id}/job_token_scope"
24 _obj_cls = ProjectJobTokenScope
25 _from_parent_attrs = {"project_id": "id"}
26 _update_method = UpdateMethod.PATCH
27
28 def get(self, **kwargs: Any) -> ProjectJobTokenScope:
29 return cast(ProjectJobTokenScope, super().get(**kwargs))
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gitlab/v4/objects/job_token_scope.py b/gitlab/v4/objects/job_token_scope.py
--- a/gitlab/v4/objects/job_token_scope.py
+++ b/gitlab/v4/objects/job_token_scope.py
@@ -2,12 +2,17 @@
from gitlab.base import RESTManager, RESTObject
from gitlab.mixins import (
+ CreateMixin,
+ DeleteMixin,
GetWithoutIdMixin,
+ ListMixin,
+ ObjectDeleteMixin,
RefreshMixin,
SaveMixin,
UpdateMethod,
UpdateMixin,
)
+from gitlab.types import RequiredOptional
__all__ = [
"ProjectJobTokenScope",
@@ -18,6 +23,9 @@
class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):
_id_attr = None
+ allowlist: "AllowlistProjectManager"
+ groups_allowlist: "AllowlistGroupManager"
+
class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):
_path = "/projects/{project_id}/job_token_scope"
@@ -27,3 +35,43 @@
def get(self, **kwargs: Any) -> ProjectJobTokenScope:
return cast(ProjectJobTokenScope, super().get(**kwargs))
+
+
+class AllowlistProject(ObjectDeleteMixin, RESTObject):
+ _id_attr = "target_project_id" # note: only true for create endpoint
+
+ def get_id(self) -> int:
+ """Returns the id of the resource. This override deals with
+ the fact that either an `id` or a `target_project_id` attribute
+ is returned by the server depending on the endpoint called."""
+ target_project_id = cast(int, super().get_id())
+ if target_project_id is not None:
+ return target_project_id
+ return cast(int, self.id)
+
+
+class AllowlistProjectManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
+ _path = "/projects/{project_id}/job_token_scope/allowlist"
+ _obj_cls = AllowlistProject
+ _from_parent_attrs = {"project_id": "project_id"}
+ _create_attrs = RequiredOptional(required=("target_project_id",))
+
+
+class AllowlistGroup(ObjectDeleteMixin, RESTObject):
+ _id_attr = "target_group_id" # note: only true for create endpoint
+
+ def get_id(self) -> int:
+ """Returns the id of the resource. This override deals with
+ the fact that either an `id` or a `target_group_id` attribute
+ is returned by the server depending on the endpoint called."""
+ target_group_id = cast(int, super().get_id())
+ if target_group_id is not None:
+ return target_group_id
+ return cast(int, self.id)
+
+
+class AllowlistGroupManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):
+ _path = "/projects/{project_id}/job_token_scope/groups_allowlist"
+ _obj_cls = AllowlistGroup
+ _from_parent_attrs = {"project_id": "project_id"}
+ _create_attrs = RequiredOptional(required=("target_group_id",))
|
{"golden_diff": "diff --git a/gitlab/v4/objects/job_token_scope.py b/gitlab/v4/objects/job_token_scope.py\n--- a/gitlab/v4/objects/job_token_scope.py\n+++ b/gitlab/v4/objects/job_token_scope.py\n@@ -2,12 +2,17 @@\n \n from gitlab.base import RESTManager, RESTObject\n from gitlab.mixins import (\n+ CreateMixin,\n+ DeleteMixin,\n GetWithoutIdMixin,\n+ ListMixin,\n+ ObjectDeleteMixin,\n RefreshMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n )\n+from gitlab.types import RequiredOptional\n \n __all__ = [\n \"ProjectJobTokenScope\",\n@@ -18,6 +23,9 @@\n class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):\n _id_attr = None\n \n+ allowlist: \"AllowlistProjectManager\"\n+ groups_allowlist: \"AllowlistGroupManager\"\n+\n \n class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope\"\n@@ -27,3 +35,43 @@\n \n def get(self, **kwargs: Any) -> ProjectJobTokenScope:\n return cast(ProjectJobTokenScope, super().get(**kwargs))\n+\n+\n+class AllowlistProject(ObjectDeleteMixin, RESTObject):\n+ _id_attr = \"target_project_id\" # note: only true for create endpoint\n+\n+ def get_id(self) -> int:\n+ \"\"\"Returns the id of the resource. This override deals with\n+ the fact that either an `id` or a `target_project_id` attribute\n+ is returned by the server depending on the endpoint called.\"\"\"\n+ target_project_id = cast(int, super().get_id())\n+ if target_project_id is not None:\n+ return target_project_id\n+ return cast(int, self.id)\n+\n+\n+class AllowlistProjectManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n+ _path = \"/projects/{project_id}/job_token_scope/allowlist\"\n+ _obj_cls = AllowlistProject\n+ _from_parent_attrs = {\"project_id\": \"project_id\"}\n+ _create_attrs = RequiredOptional(required=(\"target_project_id\",))\n+\n+\n+class AllowlistGroup(ObjectDeleteMixin, RESTObject):\n+ _id_attr = \"target_group_id\" # note: only true for create endpoint\n+\n+ def get_id(self) -> int:\n+ \"\"\"Returns the id of the resource. This override deals with\n+ the fact that either an `id` or a `target_group_id` attribute\n+ is returned by the server depending on the endpoint called.\"\"\"\n+ target_group_id = cast(int, super().get_id())\n+ if target_group_id is not None:\n+ return target_group_id\n+ return cast(int, self.id)\n+\n+\n+class AllowlistGroupManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n+ _path = \"/projects/{project_id}/job_token_scope/groups_allowlist\"\n+ _obj_cls = AllowlistGroup\n+ _from_parent_attrs = {\"project_id\": \"project_id\"}\n+ _create_attrs = RequiredOptional(required=(\"target_group_id\",))\n", "issue": "Support /projects/:id/job_token_scope/allowlist\nI would like to access the job token allow list through the Gitlab API. \r\n\r\nDocs: \r\n- [Get a project\u2019s CI/CD job token inbound allowlist](https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist)\r\n- [Allow access to your project with a job token](https://docs.gitlab.com/ee/ci/jobs/ci_job_token.html#allow-access-to-your-project-with-a-job-token)\r\n\r\nAPI endpoint: \r\n```\r\nGET/POST/DELETE /projects/:id/job_token_scope/allowlist\r\n```\r\n\r\nI currently have the following manager to access the job token scope allow list:\r\n\r\n```\r\nclass ProjectJobTokenScopeAllowlistManager(\r\n gitlab.mixins.ListMixin,\r\n gitlab.mixins.CreateMixin,\r\n gitlab.mixins.DeleteMixin,\r\n gitlab.base.RESTManager,\r\n):\r\n _path = \"/projects/{project_id}/job_token_scope/allowlist\"\r\n _obj_cls = gitlab.v4.objects.Project\r\n _from_parent_attrs = {\"project_id\": \"id\"}\r\n _create_attrs = gitlab.types.RequiredOptional(required=(\"target_project_id\",))\r\n```\n", "before_files": [{"content": "from typing import Any, cast\n\nfrom gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import (\n GetWithoutIdMixin,\n RefreshMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n)\n\n__all__ = [\n \"ProjectJobTokenScope\",\n \"ProjectJobTokenScopeManager\",\n]\n\n\nclass ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):\n _id_attr = None\n\n\nclass ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope\"\n _obj_cls = ProjectJobTokenScope\n _from_parent_attrs = {\"project_id\": \"id\"}\n _update_method = UpdateMethod.PATCH\n\n def get(self, **kwargs: Any) -> ProjectJobTokenScope:\n return cast(ProjectJobTokenScope, super().get(**kwargs))\n", "path": "gitlab/v4/objects/job_token_scope.py"}], "after_files": [{"content": "from typing import Any, cast\n\nfrom gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import (\n CreateMixin,\n DeleteMixin,\n GetWithoutIdMixin,\n ListMixin,\n ObjectDeleteMixin,\n RefreshMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n)\nfrom gitlab.types import RequiredOptional\n\n__all__ = [\n \"ProjectJobTokenScope\",\n \"ProjectJobTokenScopeManager\",\n]\n\n\nclass ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):\n _id_attr = None\n\n allowlist: \"AllowlistProjectManager\"\n groups_allowlist: \"AllowlistGroupManager\"\n\n\nclass ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope\"\n _obj_cls = ProjectJobTokenScope\n _from_parent_attrs = {\"project_id\": \"id\"}\n _update_method = UpdateMethod.PATCH\n\n def get(self, **kwargs: Any) -> ProjectJobTokenScope:\n return cast(ProjectJobTokenScope, super().get(**kwargs))\n\n\nclass AllowlistProject(ObjectDeleteMixin, RESTObject):\n _id_attr = \"target_project_id\" # note: only true for create endpoint\n\n def get_id(self) -> int:\n \"\"\"Returns the id of the resource. This override deals with\n the fact that either an `id` or a `target_project_id` attribute\n is returned by the server depending on the endpoint called.\"\"\"\n target_project_id = cast(int, super().get_id())\n if target_project_id is not None:\n return target_project_id\n return cast(int, self.id)\n\n\nclass AllowlistProjectManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope/allowlist\"\n _obj_cls = AllowlistProject\n _from_parent_attrs = {\"project_id\": \"project_id\"}\n _create_attrs = RequiredOptional(required=(\"target_project_id\",))\n\n\nclass AllowlistGroup(ObjectDeleteMixin, RESTObject):\n _id_attr = \"target_group_id\" # note: only true for create endpoint\n\n def get_id(self) -> int:\n \"\"\"Returns the id of the resource. This override deals with\n the fact that either an `id` or a `target_group_id` attribute\n is returned by the server depending on the endpoint called.\"\"\"\n target_group_id = cast(int, super().get_id())\n if target_group_id is not None:\n return target_group_id\n return cast(int, self.id)\n\n\nclass AllowlistGroupManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope/groups_allowlist\"\n _obj_cls = AllowlistGroup\n _from_parent_attrs = {\"project_id\": \"project_id\"}\n _create_attrs = RequiredOptional(required=(\"target_group_id\",))\n", "path": "gitlab/v4/objects/job_token_scope.py"}]}
| 769 | 704 |
gh_patches_debug_6178
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-440
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Django 2.2.x
Add a testenv in tox.ini for Django 2.2.
Add "Framework :: Django :: 2.2" to ``setup.py``.
Update README to add version `2.2`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'beautifulsoup4 >= 4.1.1',
17 'django >= 2.0,<3.0',
18 'django-contrib-comments == 1.8.0',
19 'django-tinymce == 2.7.0',
20 'django-uuslug == 1.1.8',
21 'html2text',
22 'odfpy >= 0.9.6',
23 'python-bugzilla',
24 'xmltodict',
25 'kobo == 0.9.0'
26 ]
27
28 extras_require = {
29 'mysql': ['mysqlclient >= 1.2.3'],
30 'pgsql': ['psycopg2 == 2.7.5'],
31
32 # Required for tcms.auth.backends.KerberosBackend
33 'krbauth': [
34 'kerberos == 1.2.5'
35 ],
36
37 # Packages for building documentation
38 'docs': [
39 'Sphinx >= 1.1.2',
40 'sphinx_rtd_theme',
41 ],
42
43 # Necessary packages for running tests
44 'tests': [
45 'beautifulsoup4',
46 'coverage',
47 'factory_boy',
48 'flake8',
49 'mock',
50 'pytest < 4.2.0',
51 'pytest-cov',
52 'pytest-django',
53 ],
54
55 # Contain tools that assists the development
56 'devtools': [
57 'django-debug-toolbar == 1.7',
58 'tox',
59 'django-extensions',
60 'pygraphviz',
61 'future-breakpoint',
62 ],
63
64 # Required packages required to run async tasks
65 'async': [
66 'celery == 4.2.0',
67 ],
68
69 'multiauth': [
70 'social-auth-app-django == 3.1.0',
71 ]
72 }
73
74 setup(
75 name='Nitrate',
76 version=pkg_version,
77 description='Test Case Management System',
78 long_description=get_long_description(),
79 author='Nitrate Team',
80 maintainer='Chenxiong Qi',
81 maintainer_email='[email protected]',
82 url='https://github.com/Nitrate/Nitrate/',
83 license='GPLv2+',
84 keywords='test case',
85 install_requires=install_requires,
86 extras_require=extras_require,
87 python_requires='>=3.6',
88 package_dir={'': 'src'},
89 packages=find_packages('src', exclude=['test*']),
90 include_package_data=True,
91 zip_safe=False,
92 classifiers=[
93 'Framework :: Django',
94 'Framework :: Django :: 2.0',
95 'Framework :: Django :: 2.1',
96 'Intended Audience :: Developers',
97 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
98 'Programming Language :: Python :: 3',
99 'Programming Language :: Python :: 3.6',
100 'Programming Language :: Python :: 3.7',
101 'Programming Language :: Python :: 3 :: Only',
102 'Topic :: Software Development :: Quality Assurance',
103 'Topic :: Software Development :: Testing',
104 ],
105 project_urls={
106 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
107 'Source Code': 'https://github.com/Nitrate/Nitrate',
108 'Documentation': 'https://nitrate.readthedocs.io/',
109 },
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -93,6 +93,7 @@
'Framework :: Django',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
+ 'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 3',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -93,6 +93,7 @@\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n+ 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n", "issue": "Add Django 2.2.x\nAdd a testenv in tox.ini for Django 2.2.\r\nAdd \"Framework :: Django :: 2.2\" to ``setup.py``.\r\nUpdate README to add version `2.2`.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
| 1,319 | 111 |
gh_patches_debug_14972
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-1349
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Test] Fix lightgbm test in new test-infra
During the process I helped kfserving migrate to new test-infra, noticed that lightgbm failed every time, and to mitigate the issue, I disabled it in the pytest.
I'm working on the lightgbm test cases, and see if I can reproduce locally.
Need to spend time and dig into it
Detailed can be found https://github.com/kubeflow/kfserving/pull/1329#issuecomment-770325947
/cc @yuzisun
/assign
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/lgbserver/setup.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-asyncio',
20 'pytest-tornasync',
21 'mypy'
22 ]
23
24 setup(
25 name='lgbserver',
26 version='0.1.0',
27 author_email='[email protected]',
28 license='../../LICENSE.txt',
29 url='https://github.com/kubeflow/kfserving/python/kfserving/lgbserver',
30 description='Model Server implementation for LightGBM. \
31 Not intended for use outside KFServing Frameworks Images',
32 long_description=open('README.md').read(),
33 python_requires='>3.4',
34 packages=find_packages("lgbserver"),
35 install_requires=[
36 "kfserving>=0.4.0",
37 "lightgbm == 2.3.1",
38 "pandas == 0.25.3",
39 "argparse >= 1.4.0"
40 ],
41 tests_require=tests_require,
42 extras_require={'test': tests_require}
43 )
44
```
Path: `python/aixexplainer/setup.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22 setup(
23 name='aixserver',
24 version='0.2.1',
25 author_email='[email protected]',
26 license='https://github.com/kubeflow/kfserving/LICENSE',
27 url='https://github.com/kubeflow/kfserving/python/aixserver',
28 description='Model Server implementation for AI eXplainability with LIME. \
29 Not intended for use outside KFServing Frameworks Images',
30 long_description=open('README.md').read(),
31 python_requires='>3.4',
32 packages=find_packages("aixserver"),
33 install_requires=[
34 "kfserving>=0.5.0",
35 "argparse >= 1.4.0",
36 "aix360 >= 0.1.0",
37 "lime >= 0.1.1.37",
38 "nest_asyncio>=1.4.0"
39 ],
40 tests_require=tests_require,
41 extras_require={'test': tests_require}
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/aixexplainer/setup.py b/python/aixexplainer/setup.py
--- a/python/aixexplainer/setup.py
+++ b/python/aixexplainer/setup.py
@@ -35,7 +35,8 @@
"argparse >= 1.4.0",
"aix360 >= 0.1.0",
"lime >= 0.1.1.37",
- "nest_asyncio>=1.4.0"
+ "nest_asyncio>=1.4.0",
+ "cvxpy == 1.1.7"
],
tests_require=tests_require,
extras_require={'test': tests_require}
diff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py
--- a/python/lgbserver/setup.py
+++ b/python/lgbserver/setup.py
@@ -36,7 +36,8 @@
"kfserving>=0.4.0",
"lightgbm == 2.3.1",
"pandas == 0.25.3",
- "argparse >= 1.4.0"
+ "argparse >= 1.4.0",
+ "numpy == 1.19.5",
],
tests_require=tests_require,
extras_require={'test': tests_require}
|
{"golden_diff": "diff --git a/python/aixexplainer/setup.py b/python/aixexplainer/setup.py\n--- a/python/aixexplainer/setup.py\n+++ b/python/aixexplainer/setup.py\n@@ -35,7 +35,8 @@\n \"argparse >= 1.4.0\",\n \"aix360 >= 0.1.0\",\n \"lime >= 0.1.1.37\",\n- \"nest_asyncio>=1.4.0\"\n+ \"nest_asyncio>=1.4.0\",\n+ \"cvxpy == 1.1.7\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\ndiff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py\n--- a/python/lgbserver/setup.py\n+++ b/python/lgbserver/setup.py\n@@ -36,7 +36,8 @@\n \"kfserving>=0.4.0\",\n \"lightgbm == 2.3.1\",\n \"pandas == 0.25.3\",\n- \"argparse >= 1.4.0\"\n+ \"argparse >= 1.4.0\",\n+ \"numpy == 1.19.5\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n", "issue": "[Test] Fix lightgbm test in new test-infra\nDuring the process I helped kfserving migrate to new test-infra, noticed that lightgbm failed every time, and to mitigate the issue, I disabled it in the pytest.\r\n\r\nI'm working on the lightgbm test cases, and see if I can reproduce locally. \r\n\r\nNeed to spend time and dig into it \r\n\r\nDetailed can be found https://github.com/kubeflow/kfserving/pull/1329#issuecomment-770325947\r\n\r\n/cc @yuzisun \r\n/assign \n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kfserving>=0.4.0\",\n \"lightgbm == 2.3.1\",\n \"pandas == 0.25.3\",\n \"argparse >= 1.4.0\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\nsetup(\n name='aixserver',\n version='0.2.1',\n author_email='[email protected]',\n license='https://github.com/kubeflow/kfserving/LICENSE',\n url='https://github.com/kubeflow/kfserving/python/aixserver',\n description='Model Server implementation for AI eXplainability with LIME. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"aixserver\"),\n install_requires=[\n \"kfserving>=0.5.0\",\n \"argparse >= 1.4.0\",\n \"aix360 >= 0.1.0\",\n \"lime >= 0.1.1.37\",\n \"nest_asyncio>=1.4.0\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/aixexplainer/setup.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kfserving>=0.4.0\",\n \"lightgbm == 2.3.1\",\n \"pandas == 0.25.3\",\n \"argparse >= 1.4.0\",\n \"numpy == 1.19.5\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\nsetup(\n name='aixserver',\n version='0.2.1',\n author_email='[email protected]',\n license='https://github.com/kubeflow/kfserving/LICENSE',\n url='https://github.com/kubeflow/kfserving/python/aixserver',\n description='Model Server implementation for AI eXplainability with LIME. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"aixserver\"),\n install_requires=[\n \"kfserving>=0.5.0\",\n \"argparse >= 1.4.0\",\n \"aix360 >= 0.1.0\",\n \"lime >= 0.1.1.37\",\n \"nest_asyncio>=1.4.0\",\n \"cvxpy == 1.1.7\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/aixexplainer/setup.py"}]}
| 1,291 | 294 |
gh_patches_debug_13923
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1791
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enhancement: Only run Sonar & Snyk on forks
### Summary
When forking the repo, tests are brought with it but Snyk & Sonar CI fails because the secrets are not set.
### Basic Example
Add ` && github.repository_owner == 'starlite-api'` to the if-check in `ci.yaml` to prevent these two CI items from running on non-upstream repos.
### Drawbacks and Impact
_No response_
### Unresolved questions
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/types/composite_types.py`
Content:
```
1 from functools import partial
2 from os import PathLike
3 from pathlib import Path
4 from typing import (
5 TYPE_CHECKING,
6 Any,
7 AsyncIterable,
8 AsyncIterator,
9 Callable,
10 Dict,
11 Iterable,
12 Iterator,
13 Literal,
14 Mapping,
15 Sequence,
16 Set,
17 Tuple,
18 Type,
19 TypeVar,
20 Union,
21 )
22
23 from litestar.enums import ScopeType
24
25 from .asgi_types import ASGIApp
26 from .callable_types import AnyCallable, ExceptionHandler
27
28 if TYPE_CHECKING:
29 from litestar.datastructures.cookie import Cookie
30 from litestar.datastructures.response_header import ResponseHeader
31 from litestar.di import Provide
32 from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol
33 from litestar.params import ParameterKwarg
34 else:
35 BaseHTTPMiddleware = Any
36 Cookie = Any
37 DefineMiddleware = Any
38 ImmutableState = Any
39 MiddlewareProtocol = Any
40 ParameterKwarg = Any
41 Provide = Any
42 ResponseHeader = Any
43
44 T = TypeVar("T")
45
46
47 Dependencies = Mapping[str, Union[Provide, AnyCallable]]
48 ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]
49 MaybePartial = Union[T, partial]
50 Middleware = Union[
51 Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]
52 ]
53 ParametersMap = Mapping[str, ParameterKwarg]
54 PathType = Union[Path, PathLike, str]
55 ResponseCookies = Union[Sequence[Cookie], Mapping[str, str]]
56 ResponseHeaders = Union[Sequence[ResponseHeader], Mapping[str, str]]
57 Scopes = Set[Literal[ScopeType.HTTP, ScopeType.WEBSOCKET]]
58 StreamType = Union[Iterable[T], Iterator[T], AsyncIterable[T], AsyncIterator[T]]
59 TypeEncodersMap = Mapping[Any, Callable[[Any], Any]]
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/litestar/types/composite_types.py b/litestar/types/composite_types.py
--- a/litestar/types/composite_types.py
+++ b/litestar/types/composite_types.py
@@ -12,6 +12,7 @@
Iterator,
Literal,
Mapping,
+ MutableMapping,
Sequence,
Set,
Tuple,
@@ -45,7 +46,7 @@
Dependencies = Mapping[str, Union[Provide, AnyCallable]]
-ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]
+ExceptionHandlersMap = MutableMapping[Union[int, Type[Exception]], ExceptionHandler]
MaybePartial = Union[T, partial]
Middleware = Union[
Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]
|
{"golden_diff": "diff --git a/litestar/types/composite_types.py b/litestar/types/composite_types.py\n--- a/litestar/types/composite_types.py\n+++ b/litestar/types/composite_types.py\n@@ -12,6 +12,7 @@\n Iterator,\n Literal,\n Mapping,\n+ MutableMapping,\n Sequence,\n Set,\n Tuple,\n@@ -45,7 +46,7 @@\n \n \n Dependencies = Mapping[str, Union[Provide, AnyCallable]]\n-ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]\n+ExceptionHandlersMap = MutableMapping[Union[int, Type[Exception]], ExceptionHandler]\n MaybePartial = Union[T, partial]\n Middleware = Union[\n Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]\n", "issue": "Enhancement: Only run Sonar & Snyk on forks\n### Summary\n\nWhen forking the repo, tests are brought with it but Snyk & Sonar CI fails because the secrets are not set.\n\n### Basic Example\n\nAdd ` && github.repository_owner == 'starlite-api'` to the if-check in `ci.yaml` to prevent these two CI items from running on non-upstream repos.\n\n### Drawbacks and Impact\n\n_No response_\n\n### Unresolved questions\n\n_No response_\n", "before_files": [{"content": "from functools import partial\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterable,\n AsyncIterator,\n Callable,\n Dict,\n Iterable,\n Iterator,\n Literal,\n Mapping,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom litestar.enums import ScopeType\n\nfrom .asgi_types import ASGIApp\nfrom .callable_types import AnyCallable, ExceptionHandler\n\nif TYPE_CHECKING:\n from litestar.datastructures.cookie import Cookie\n from litestar.datastructures.response_header import ResponseHeader\n from litestar.di import Provide\n from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol\n from litestar.params import ParameterKwarg\nelse:\n BaseHTTPMiddleware = Any\n Cookie = Any\n DefineMiddleware = Any\n ImmutableState = Any\n MiddlewareProtocol = Any\n ParameterKwarg = Any\n Provide = Any\n ResponseHeader = Any\n\nT = TypeVar(\"T\")\n\n\nDependencies = Mapping[str, Union[Provide, AnyCallable]]\nExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]\nMaybePartial = Union[T, partial]\nMiddleware = Union[\n Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]\n]\nParametersMap = Mapping[str, ParameterKwarg]\nPathType = Union[Path, PathLike, str]\nResponseCookies = Union[Sequence[Cookie], Mapping[str, str]]\nResponseHeaders = Union[Sequence[ResponseHeader], Mapping[str, str]]\nScopes = Set[Literal[ScopeType.HTTP, ScopeType.WEBSOCKET]]\nStreamType = Union[Iterable[T], Iterator[T], AsyncIterable[T], AsyncIterator[T]]\nTypeEncodersMap = Mapping[Any, Callable[[Any], Any]]\n", "path": "litestar/types/composite_types.py"}], "after_files": [{"content": "from functools import partial\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterable,\n AsyncIterator,\n Callable,\n Dict,\n Iterable,\n Iterator,\n Literal,\n Mapping,\n MutableMapping,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom litestar.enums import ScopeType\n\nfrom .asgi_types import ASGIApp\nfrom .callable_types import AnyCallable, ExceptionHandler\n\nif TYPE_CHECKING:\n from litestar.datastructures.cookie import Cookie\n from litestar.datastructures.response_header import ResponseHeader\n from litestar.di import Provide\n from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol\n from litestar.params import ParameterKwarg\nelse:\n BaseHTTPMiddleware = Any\n Cookie = Any\n DefineMiddleware = Any\n ImmutableState = Any\n MiddlewareProtocol = Any\n ParameterKwarg = Any\n Provide = Any\n ResponseHeader = Any\n\nT = TypeVar(\"T\")\n\n\nDependencies = Mapping[str, Union[Provide, AnyCallable]]\nExceptionHandlersMap = MutableMapping[Union[int, Type[Exception]], ExceptionHandler]\nMaybePartial = Union[T, partial]\nMiddleware = Union[\n Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]\n]\nParametersMap = Mapping[str, ParameterKwarg]\nPathType = Union[Path, PathLike, str]\nResponseCookies = Union[Sequence[Cookie], Mapping[str, str]]\nResponseHeaders = Union[Sequence[ResponseHeader], Mapping[str, str]]\nScopes = Set[Literal[ScopeType.HTTP, ScopeType.WEBSOCKET]]\nStreamType = Union[Iterable[T], Iterator[T], AsyncIterable[T], AsyncIterator[T]]\nTypeEncodersMap = Mapping[Any, Callable[[Any], Any]]\n", "path": "litestar/types/composite_types.py"}]}
| 872 | 180 |
gh_patches_debug_7610
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1675
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Loss automatically detaching inputs breaks some workflows
## 🐛 Bug description
https://github.com/pytorch/ignite/blob/master/ignite/metrics/loss.py#L60
Before 0.4.3, the `y_pred` and `y` were passed without calling detach. Now because of detach a number of usecases are no longer possible.
An example is: https://docs.gpytorch.ai/en/v1.3.1/marginal_log_likelihoods.html#variationalelbo
The output of `model(train_x)` is an object which does not support detach. This leads to a crash when passing the `mll` function to Loss as a `loss_fn`.
Another example is https://github.com/y0ast/deterministic-uncertainty-quantification/blob/master/train_duq_cifar.py#L153
The loss is dependent on the gradient, which means that by calling detach the loss cannot be computed anymore.
I have been trying to work around it, but I can't figure out a nice way. https://pytorch.org/ignite/metrics.html#ignite.metrics.Average is not input size aware so it cannot correctly compute the average of the loss.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/loss.py`
Content:
```
1 from typing import Callable, Dict, Sequence, Tuple, Union, cast
2
3 import torch
4
5 from ignite.exceptions import NotComputableError
6 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
7
8 __all__ = ["Loss"]
9
10
11 class Loss(Metric):
12 """
13 Calculates the average loss according to the passed loss_fn.
14
15 Args:
16 loss_fn (callable): a callable taking a prediction tensor, a target
17 tensor, optionally other arguments, and returns the average loss
18 over all observations in the batch.
19 output_transform (callable): a callable that is used to transform the
20 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
21 form expected by the metric.
22 This can be useful if, for example, you have a multi-output model and
23 you want to compute the metric with respect to one of the outputs.
24 The output is expected to be a tuple `(prediction, target)` or
25 (prediction, target, kwargs) where kwargs is a dictionary of extra
26 keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.
27 batch_size (callable): a callable taking a target tensor that returns the
28 first dimension size (usually the batch size).
29 device (str or torch.device): specifies which device updates are accumulated on. Setting the
30 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
31 non-blocking. By default, CPU.
32
33 """
34
35 required_output_keys = None
36
37 def __init__(
38 self,
39 loss_fn: Callable,
40 output_transform: Callable = lambda x: x,
41 batch_size: Callable = lambda x: len(x),
42 device: Union[str, torch.device] = torch.device("cpu"),
43 ):
44 super(Loss, self).__init__(output_transform, device=device)
45 self._loss_fn = loss_fn
46 self._batch_size = batch_size
47
48 @reinit__is_reduced
49 def reset(self) -> None:
50 self._sum = torch.tensor(0.0, device=self._device)
51 self._num_examples = 0
52
53 @reinit__is_reduced
54 def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:
55 if len(output) == 2:
56 y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)
57 kwargs = {} # type: Dict
58 else:
59 y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)
60 average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)
61
62 if len(average_loss.shape) != 0:
63 raise ValueError("loss_fn did not return the average loss.")
64
65 n = self._batch_size(y)
66 self._sum += average_loss.to(self._device) * n
67 self._num_examples += n
68
69 @sync_all_reduce("_sum", "_num_examples")
70 def compute(self) -> float:
71 if self._num_examples == 0:
72 raise NotComputableError("Loss must have at least one example before it can be computed.")
73 return self._sum.item() / self._num_examples
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py
--- a/ignite/metrics/loss.py
+++ b/ignite/metrics/loss.py
@@ -57,7 +57,7 @@
kwargs = {} # type: Dict
else:
y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)
- average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)
+ average_loss = self._loss_fn(y_pred, y, **kwargs).detach()
if len(average_loss.shape) != 0:
raise ValueError("loss_fn did not return the average loss.")
|
{"golden_diff": "diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py\n--- a/ignite/metrics/loss.py\n+++ b/ignite/metrics/loss.py\n@@ -57,7 +57,7 @@\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n- average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)\n+ average_loss = self._loss_fn(y_pred, y, **kwargs).detach()\n \n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n", "issue": "Loss automatically detaching inputs breaks some workflows\n## \ud83d\udc1b Bug description\r\n\r\nhttps://github.com/pytorch/ignite/blob/master/ignite/metrics/loss.py#L60\r\n\r\nBefore 0.4.3, the `y_pred` and `y` were passed without calling detach. Now because of detach a number of usecases are no longer possible.\r\n\r\nAn example is: https://docs.gpytorch.ai/en/v1.3.1/marginal_log_likelihoods.html#variationalelbo\r\n\r\nThe output of `model(train_x)` is an object which does not support detach. This leads to a crash when passing the `mll` function to Loss as a `loss_fn`.\r\n\r\nAnother example is https://github.com/y0ast/deterministic-uncertainty-quantification/blob/master/train_duq_cifar.py#L153\r\n\r\nThe loss is dependent on the gradient, which means that by calling detach the loss cannot be computed anymore.\r\n\r\nI have been trying to work around it, but I can't figure out a nice way. https://pytorch.org/ignite/metrics.html#ignite.metrics.Average is not input size aware so it cannot correctly compute the average of the loss.\n", "before_files": [{"content": "from typing import Callable, Dict, Sequence, Tuple, Union, cast\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"Loss\"]\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn (callable): a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform (callable): a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is expected to be a tuple `(prediction, target)` or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.\n batch_size (callable): a callable taking a target tensor that returns the\n first dimension size (usually the batch size).\n device (str or torch.device): specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n loss_fn: Callable,\n output_transform: Callable = lambda x: x,\n batch_size: Callable = lambda x: len(x),\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super(Loss, self).__init__(output_transform, device=device)\n self._loss_fn = loss_fn\n self._batch_size = batch_size\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:\n if len(output) == 2:\n y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)\n\n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n\n n = self._batch_size(y)\n self._sum += average_loss.to(self._device) * n\n self._num_examples += n\n\n @sync_all_reduce(\"_sum\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"Loss must have at least one example before it can be computed.\")\n return self._sum.item() / self._num_examples\n", "path": "ignite/metrics/loss.py"}], "after_files": [{"content": "from typing import Callable, Dict, Sequence, Tuple, Union, cast\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"Loss\"]\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn (callable): a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform (callable): a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is expected to be a tuple `(prediction, target)` or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.\n batch_size (callable): a callable taking a target tensor that returns the\n first dimension size (usually the batch size).\n device (str or torch.device): specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n loss_fn: Callable,\n output_transform: Callable = lambda x: x,\n batch_size: Callable = lambda x: len(x),\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super(Loss, self).__init__(output_transform, device=device)\n self._loss_fn = loss_fn\n self._batch_size = batch_size\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:\n if len(output) == 2:\n y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n average_loss = self._loss_fn(y_pred, y, **kwargs).detach()\n\n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n\n n = self._batch_size(y)\n self._sum += average_loss.to(self._device) * n\n self._num_examples += n\n\n @sync_all_reduce(\"_sum\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"Loss must have at least one example before it can be computed.\")\n return self._sum.item() / self._num_examples\n", "path": "ignite/metrics/loss.py"}]}
| 1,343 | 151 |
gh_patches_debug_24221
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-4928
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
crpn_faster_rcnn gets "No gt bboxes" error
Hello,
I have been trying to run crpn_faster_rcnn but the training always fails with "No gt bboxes" error.
I am using custom models but the model does have a region proposals as I am loading the weights from pre-trained weights for crpn I created from the same dataset.
I've played around with the learning rate but no chance.
Do you have any hint or advice on what is going on? Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/core/bbox/assigners/region_assigner.py`
Content:
```
1 import torch
2
3 from mmdet.core import anchor_inside_flags
4 from ..builder import BBOX_ASSIGNERS
5 from .assign_result import AssignResult
6 from .base_assigner import BaseAssigner
7
8
9 def calc_region(bbox, ratio, stride, featmap_size=None):
10 """Calculate region of the box defined by the ratio, the ratio is from the
11 center of the box to every edge."""
12 # project bbox on the feature
13 f_bbox = bbox / stride
14 x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])
15 y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])
16 x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])
17 y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])
18 if featmap_size is not None:
19 x1 = x1.clamp(min=0, max=featmap_size[1])
20 y1 = y1.clamp(min=0, max=featmap_size[0])
21 x2 = x2.clamp(min=0, max=featmap_size[1])
22 y2 = y2.clamp(min=0, max=featmap_size[0])
23 return (x1, y1, x2, y2)
24
25
26 def anchor_ctr_inside_region_flags(anchors, stride, region):
27 """Get the flag indicate whether anchor centers are inside regions."""
28 x1, y1, x2, y2 = region
29 f_anchors = anchors / stride
30 x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
31 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
32 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
33 return flags
34
35
36 @BBOX_ASSIGNERS.register_module()
37 class RegionAssigner(BaseAssigner):
38 """Assign a corresponding gt bbox or background to each bbox.
39
40 Each proposals will be assigned with `-1`, `0`, or a positive integer
41 indicating the ground truth index.
42
43 - -1: don't care
44 - 0: negative sample, no assigned gt
45 - positive integer: positive sample, index (1-based) of assigned gt
46
47 Args:
48 center_ratio: ratio of the region in the center of the bbox to
49 define positive sample.
50 ignore_ratio: ratio of the region to define ignore samples.
51 """
52
53 def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
54 self.center_ratio = center_ratio
55 self.ignore_ratio = ignore_ratio
56
57 def assign(self,
58 mlvl_anchors,
59 mlvl_valid_flags,
60 gt_bboxes,
61 img_meta,
62 featmap_sizes,
63 anchor_scale,
64 anchor_strides,
65 gt_bboxes_ignore=None,
66 gt_labels=None,
67 allowed_border=0):
68 """Assign gt to anchors.
69
70 This method assign a gt bbox to every bbox (proposal/anchor), each bbox
71 will be assigned with -1, 0, or a positive number. -1 means don't care,
72 0 means negative sample, positive number is the index (1-based) of
73 assigned gt.
74 The assignment is done in following steps, the order matters.
75
76 1. Assign every anchor to 0 (negative)
77 For each gt_bboxes:
78 2. Compute ignore flags based on ignore_region then
79 assign -1 to anchors w.r.t. ignore flags
80 3. Compute pos flags based on center_region then
81 assign gt_bboxes to anchors w.r.t. pos flags
82 4. Compute ignore flags based on adjacent anchor lvl then
83 assign -1 to anchors w.r.t. ignore flags
84 5. Assign anchor outside of image to -1
85
86 Args:
87 mlvl_anchors (list[Tensor]): Multi level anchors.
88 mlvl_valid_flags (list[Tensor]): Multi level valid flags.
89 gt_bboxes (Tensor): Ground truth bboxes of image
90 img_meta (dict): Meta info of image.
91 featmap_sizes (list[Tensor]): Feature mapsize each level
92 anchor_scale (int): Scale of the anchor.
93 anchor_strides (list[int]): Stride of the anchor.
94 gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
95 gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
96 labelled as `ignored`, e.g., crowd boxes in COCO.
97 gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
98 allowed_border (int, optional): The border to allow the valid
99 anchor. Defaults to 0.
100
101 Returns:
102 :obj:`AssignResult`: The assign result.
103 """
104 # TODO support gt_bboxes_ignore
105 if gt_bboxes_ignore is not None:
106 raise NotImplementedError
107 if gt_bboxes.shape[0] == 0:
108 raise ValueError('No gt bboxes')
109 num_gts = gt_bboxes.shape[0]
110 num_lvls = len(mlvl_anchors)
111 r1 = (1 - self.center_ratio) / 2
112 r2 = (1 - self.ignore_ratio) / 2
113
114 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
115 (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
116 min_anchor_size = scale.new_full(
117 (1, ), float(anchor_scale * anchor_strides[0]))
118 target_lvls = torch.floor(
119 torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
120 target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
121
122 # 1. assign 0 (negative) by default
123 mlvl_assigned_gt_inds = []
124 mlvl_ignore_flags = []
125 for lvl in range(num_lvls):
126 h, w = featmap_sizes[lvl]
127 assert h * w == mlvl_anchors[lvl].shape[0]
128 assigned_gt_inds = gt_bboxes.new_full((h * w, ),
129 0,
130 dtype=torch.long)
131 ignore_flags = torch.zeros_like(assigned_gt_inds)
132 mlvl_assigned_gt_inds.append(assigned_gt_inds)
133 mlvl_ignore_flags.append(ignore_flags)
134
135 for gt_id in range(num_gts):
136 lvl = target_lvls[gt_id].item()
137 featmap_size = featmap_sizes[lvl]
138 stride = anchor_strides[lvl]
139 anchors = mlvl_anchors[lvl]
140 gt_bbox = gt_bboxes[gt_id, :4]
141
142 # Compute regions
143 ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)
144 ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)
145
146 # 2. Assign -1 to ignore flags
147 ignore_flags = anchor_ctr_inside_region_flags(
148 anchors, stride, ignore_region)
149 mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
150
151 # 3. Assign gt_bboxes to pos flags
152 pos_flags = anchor_ctr_inside_region_flags(anchors, stride,
153 ctr_region)
154 mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1
155
156 # 4. Assign -1 to ignore adjacent lvl
157 if lvl > 0:
158 d_lvl = lvl - 1
159 d_anchors = mlvl_anchors[d_lvl]
160 d_featmap_size = featmap_sizes[d_lvl]
161 d_stride = anchor_strides[d_lvl]
162 d_ignore_region = calc_region(gt_bbox, r2, d_stride,
163 d_featmap_size)
164 ignore_flags = anchor_ctr_inside_region_flags(
165 d_anchors, d_stride, d_ignore_region)
166 mlvl_ignore_flags[d_lvl][ignore_flags] = 1
167 if lvl < num_lvls - 1:
168 u_lvl = lvl + 1
169 u_anchors = mlvl_anchors[u_lvl]
170 u_featmap_size = featmap_sizes[u_lvl]
171 u_stride = anchor_strides[u_lvl]
172 u_ignore_region = calc_region(gt_bbox, r2, u_stride,
173 u_featmap_size)
174 ignore_flags = anchor_ctr_inside_region_flags(
175 u_anchors, u_stride, u_ignore_region)
176 mlvl_ignore_flags[u_lvl][ignore_flags] = 1
177
178 # 4. (cont.) Assign -1 to ignore adjacent lvl
179 for lvl in range(num_lvls):
180 ignore_flags = mlvl_ignore_flags[lvl]
181 mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
182
183 # 5. Assign -1 to anchor outside of image
184 flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)
185 flat_anchors = torch.cat(mlvl_anchors)
186 flat_valid_flags = torch.cat(mlvl_valid_flags)
187 assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==
188 flat_valid_flags.shape[0])
189 inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,
190 img_meta['img_shape'],
191 allowed_border)
192 outside_flags = ~inside_flags
193 flat_assigned_gt_inds[outside_flags] = -1
194
195 if gt_labels is not None:
196 assigned_labels = torch.zeros_like(flat_assigned_gt_inds)
197 pos_flags = assigned_gt_inds > 0
198 assigned_labels[pos_flags] = gt_labels[
199 flat_assigned_gt_inds[pos_flags] - 1]
200 else:
201 assigned_labels = None
202
203 return AssignResult(
204 num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmdet/core/bbox/assigners/region_assigner.py b/mmdet/core/bbox/assigners/region_assigner.py
--- a/mmdet/core/bbox/assigners/region_assigner.py
+++ b/mmdet/core/bbox/assigners/region_assigner.py
@@ -101,12 +101,29 @@
Returns:
:obj:`AssignResult`: The assign result.
"""
- # TODO support gt_bboxes_ignore
if gt_bboxes_ignore is not None:
raise NotImplementedError
- if gt_bboxes.shape[0] == 0:
- raise ValueError('No gt bboxes')
+
num_gts = gt_bboxes.shape[0]
+ num_bboxes = sum(x.shape[0] for x in mlvl_anchors)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))
+ assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),
+ dtype=torch.long)
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = gt_bboxes.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
num_lvls = len(mlvl_anchors)
r1 = (1 - self.center_ratio) / 2
r2 = (1 - self.ignore_ratio) / 2
|
{"golden_diff": "diff --git a/mmdet/core/bbox/assigners/region_assigner.py b/mmdet/core/bbox/assigners/region_assigner.py\n--- a/mmdet/core/bbox/assigners/region_assigner.py\n+++ b/mmdet/core/bbox/assigners/region_assigner.py\n@@ -101,12 +101,29 @@\n Returns:\n :obj:`AssignResult`: The assign result.\n \"\"\"\n- # TODO support gt_bboxes_ignore\n if gt_bboxes_ignore is not None:\n raise NotImplementedError\n- if gt_bboxes.shape[0] == 0:\n- raise ValueError('No gt bboxes')\n+\n num_gts = gt_bboxes.shape[0]\n+ num_bboxes = sum(x.shape[0] for x in mlvl_anchors)\n+\n+ if num_gts == 0 or num_bboxes == 0:\n+ # No ground truth or boxes, return empty assignment\n+ max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))\n+ assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),\n+ dtype=torch.long)\n+ if gt_labels is None:\n+ assigned_labels = None\n+ else:\n+ assigned_labels = gt_bboxes.new_full((num_bboxes, ),\n+ -1,\n+ dtype=torch.long)\n+ return AssignResult(\n+ num_gts,\n+ assigned_gt_inds,\n+ max_overlaps,\n+ labels=assigned_labels)\n+\n num_lvls = len(mlvl_anchors)\n r1 = (1 - self.center_ratio) / 2\n r2 = (1 - self.ignore_ratio) / 2\n", "issue": "crpn_faster_rcnn gets \"No gt bboxes\" error\nHello, \r\n\r\nI have been trying to run crpn_faster_rcnn but the training always fails with \"No gt bboxes\" error. \r\n\r\nI am using custom models but the model does have a region proposals as I am loading the weights from pre-trained weights for crpn I created from the same dataset. \r\n\r\nI've played around with the learning rate but no chance.\r\n\r\nDo you have any hint or advice on what is going on? Thanks.\n", "before_files": [{"content": "import torch\n\nfrom mmdet.core import anchor_inside_flags\nfrom ..builder import BBOX_ASSIGNERS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef calc_region(bbox, ratio, stride, featmap_size=None):\n \"\"\"Calculate region of the box defined by the ratio, the ratio is from the\n center of the box to every edge.\"\"\"\n # project bbox on the feature\n f_bbox = bbox / stride\n x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])\n y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])\n x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])\n y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])\n if featmap_size is not None:\n x1 = x1.clamp(min=0, max=featmap_size[1])\n y1 = y1.clamp(min=0, max=featmap_size[0])\n x2 = x2.clamp(min=0, max=featmap_size[1])\n y2 = y2.clamp(min=0, max=featmap_size[0])\n return (x1, y1, x2, y2)\n\n\ndef anchor_ctr_inside_region_flags(anchors, stride, region):\n \"\"\"Get the flag indicate whether anchor centers are inside regions.\"\"\"\n x1, y1, x2, y2 = region\n f_anchors = anchors / stride\n x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5\n y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5\n flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)\n return flags\n\n\n@BBOX_ASSIGNERS.register_module()\nclass RegionAssigner(BaseAssigner):\n \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n Each proposals will be assigned with `-1`, `0`, or a positive integer\n indicating the ground truth index.\n\n - -1: don't care\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n center_ratio: ratio of the region in the center of the bbox to\n define positive sample.\n ignore_ratio: ratio of the region to define ignore samples.\n \"\"\"\n\n def __init__(self, center_ratio=0.2, ignore_ratio=0.5):\n self.center_ratio = center_ratio\n self.ignore_ratio = ignore_ratio\n\n def assign(self,\n mlvl_anchors,\n mlvl_valid_flags,\n gt_bboxes,\n img_meta,\n featmap_sizes,\n anchor_scale,\n anchor_strides,\n gt_bboxes_ignore=None,\n gt_labels=None,\n allowed_border=0):\n \"\"\"Assign gt to anchors.\n\n This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n will be assigned with -1, 0, or a positive number. -1 means don't care,\n 0 means negative sample, positive number is the index (1-based) of\n assigned gt.\n The assignment is done in following steps, the order matters.\n\n 1. Assign every anchor to 0 (negative)\n For each gt_bboxes:\n 2. Compute ignore flags based on ignore_region then\n assign -1 to anchors w.r.t. ignore flags\n 3. Compute pos flags based on center_region then\n assign gt_bboxes to anchors w.r.t. pos flags\n 4. Compute ignore flags based on adjacent anchor lvl then\n assign -1 to anchors w.r.t. ignore flags\n 5. Assign anchor outside of image to -1\n\n Args:\n mlvl_anchors (list[Tensor]): Multi level anchors.\n mlvl_valid_flags (list[Tensor]): Multi level valid flags.\n gt_bboxes (Tensor): Ground truth bboxes of image\n img_meta (dict): Meta info of image.\n featmap_sizes (list[Tensor]): Feature mapsize each level\n anchor_scale (int): Scale of the anchor.\n anchor_strides (list[int]): Stride of the anchor.\n gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`, e.g., crowd boxes in COCO.\n gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n allowed_border (int, optional): The border to allow the valid\n anchor. Defaults to 0.\n\n Returns:\n :obj:`AssignResult`: The assign result.\n \"\"\"\n # TODO support gt_bboxes_ignore\n if gt_bboxes_ignore is not None:\n raise NotImplementedError\n if gt_bboxes.shape[0] == 0:\n raise ValueError('No gt bboxes')\n num_gts = gt_bboxes.shape[0]\n num_lvls = len(mlvl_anchors)\n r1 = (1 - self.center_ratio) / 2\n r2 = (1 - self.ignore_ratio) / 2\n\n scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n min_anchor_size = scale.new_full(\n (1, ), float(anchor_scale * anchor_strides[0]))\n target_lvls = torch.floor(\n torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n\n # 1. assign 0 (negative) by default\n mlvl_assigned_gt_inds = []\n mlvl_ignore_flags = []\n for lvl in range(num_lvls):\n h, w = featmap_sizes[lvl]\n assert h * w == mlvl_anchors[lvl].shape[0]\n assigned_gt_inds = gt_bboxes.new_full((h * w, ),\n 0,\n dtype=torch.long)\n ignore_flags = torch.zeros_like(assigned_gt_inds)\n mlvl_assigned_gt_inds.append(assigned_gt_inds)\n mlvl_ignore_flags.append(ignore_flags)\n\n for gt_id in range(num_gts):\n lvl = target_lvls[gt_id].item()\n featmap_size = featmap_sizes[lvl]\n stride = anchor_strides[lvl]\n anchors = mlvl_anchors[lvl]\n gt_bbox = gt_bboxes[gt_id, :4]\n\n # Compute regions\n ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)\n ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)\n\n # 2. Assign -1 to ignore flags\n ignore_flags = anchor_ctr_inside_region_flags(\n anchors, stride, ignore_region)\n mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n # 3. Assign gt_bboxes to pos flags\n pos_flags = anchor_ctr_inside_region_flags(anchors, stride,\n ctr_region)\n mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1\n\n # 4. Assign -1 to ignore adjacent lvl\n if lvl > 0:\n d_lvl = lvl - 1\n d_anchors = mlvl_anchors[d_lvl]\n d_featmap_size = featmap_sizes[d_lvl]\n d_stride = anchor_strides[d_lvl]\n d_ignore_region = calc_region(gt_bbox, r2, d_stride,\n d_featmap_size)\n ignore_flags = anchor_ctr_inside_region_flags(\n d_anchors, d_stride, d_ignore_region)\n mlvl_ignore_flags[d_lvl][ignore_flags] = 1\n if lvl < num_lvls - 1:\n u_lvl = lvl + 1\n u_anchors = mlvl_anchors[u_lvl]\n u_featmap_size = featmap_sizes[u_lvl]\n u_stride = anchor_strides[u_lvl]\n u_ignore_region = calc_region(gt_bbox, r2, u_stride,\n u_featmap_size)\n ignore_flags = anchor_ctr_inside_region_flags(\n u_anchors, u_stride, u_ignore_region)\n mlvl_ignore_flags[u_lvl][ignore_flags] = 1\n\n # 4. (cont.) Assign -1 to ignore adjacent lvl\n for lvl in range(num_lvls):\n ignore_flags = mlvl_ignore_flags[lvl]\n mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n # 5. Assign -1 to anchor outside of image\n flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)\n flat_anchors = torch.cat(mlvl_anchors)\n flat_valid_flags = torch.cat(mlvl_valid_flags)\n assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==\n flat_valid_flags.shape[0])\n inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,\n img_meta['img_shape'],\n allowed_border)\n outside_flags = ~inside_flags\n flat_assigned_gt_inds[outside_flags] = -1\n\n if gt_labels is not None:\n assigned_labels = torch.zeros_like(flat_assigned_gt_inds)\n pos_flags = assigned_gt_inds > 0\n assigned_labels[pos_flags] = gt_labels[\n flat_assigned_gt_inds[pos_flags] - 1]\n else:\n assigned_labels = None\n\n return AssignResult(\n num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)\n", "path": "mmdet/core/bbox/assigners/region_assigner.py"}], "after_files": [{"content": "import torch\n\nfrom mmdet.core import anchor_inside_flags\nfrom ..builder import BBOX_ASSIGNERS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef calc_region(bbox, ratio, stride, featmap_size=None):\n \"\"\"Calculate region of the box defined by the ratio, the ratio is from the\n center of the box to every edge.\"\"\"\n # project bbox on the feature\n f_bbox = bbox / stride\n x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])\n y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])\n x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])\n y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])\n if featmap_size is not None:\n x1 = x1.clamp(min=0, max=featmap_size[1])\n y1 = y1.clamp(min=0, max=featmap_size[0])\n x2 = x2.clamp(min=0, max=featmap_size[1])\n y2 = y2.clamp(min=0, max=featmap_size[0])\n return (x1, y1, x2, y2)\n\n\ndef anchor_ctr_inside_region_flags(anchors, stride, region):\n \"\"\"Get the flag indicate whether anchor centers are inside regions.\"\"\"\n x1, y1, x2, y2 = region\n f_anchors = anchors / stride\n x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5\n y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5\n flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)\n return flags\n\n\n@BBOX_ASSIGNERS.register_module()\nclass RegionAssigner(BaseAssigner):\n \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n Each proposals will be assigned with `-1`, `0`, or a positive integer\n indicating the ground truth index.\n\n - -1: don't care\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n center_ratio: ratio of the region in the center of the bbox to\n define positive sample.\n ignore_ratio: ratio of the region to define ignore samples.\n \"\"\"\n\n def __init__(self, center_ratio=0.2, ignore_ratio=0.5):\n self.center_ratio = center_ratio\n self.ignore_ratio = ignore_ratio\n\n def assign(self,\n mlvl_anchors,\n mlvl_valid_flags,\n gt_bboxes,\n img_meta,\n featmap_sizes,\n anchor_scale,\n anchor_strides,\n gt_bboxes_ignore=None,\n gt_labels=None,\n allowed_border=0):\n \"\"\"Assign gt to anchors.\n\n This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n will be assigned with -1, 0, or a positive number. -1 means don't care,\n 0 means negative sample, positive number is the index (1-based) of\n assigned gt.\n The assignment is done in following steps, the order matters.\n\n 1. Assign every anchor to 0 (negative)\n For each gt_bboxes:\n 2. Compute ignore flags based on ignore_region then\n assign -1 to anchors w.r.t. ignore flags\n 3. Compute pos flags based on center_region then\n assign gt_bboxes to anchors w.r.t. pos flags\n 4. Compute ignore flags based on adjacent anchor lvl then\n assign -1 to anchors w.r.t. ignore flags\n 5. Assign anchor outside of image to -1\n\n Args:\n mlvl_anchors (list[Tensor]): Multi level anchors.\n mlvl_valid_flags (list[Tensor]): Multi level valid flags.\n gt_bboxes (Tensor): Ground truth bboxes of image\n img_meta (dict): Meta info of image.\n featmap_sizes (list[Tensor]): Feature mapsize each level\n anchor_scale (int): Scale of the anchor.\n anchor_strides (list[int]): Stride of the anchor.\n gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`, e.g., crowd boxes in COCO.\n gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n allowed_border (int, optional): The border to allow the valid\n anchor. Defaults to 0.\n\n Returns:\n :obj:`AssignResult`: The assign result.\n \"\"\"\n if gt_bboxes_ignore is not None:\n raise NotImplementedError\n\n num_gts = gt_bboxes.shape[0]\n num_bboxes = sum(x.shape[0] for x in mlvl_anchors)\n\n if num_gts == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))\n assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),\n dtype=torch.long)\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = gt_bboxes.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gts,\n assigned_gt_inds,\n max_overlaps,\n labels=assigned_labels)\n\n num_lvls = len(mlvl_anchors)\n r1 = (1 - self.center_ratio) / 2\n r2 = (1 - self.ignore_ratio) / 2\n\n scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n min_anchor_size = scale.new_full(\n (1, ), float(anchor_scale * anchor_strides[0]))\n target_lvls = torch.floor(\n torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n\n # 1. assign 0 (negative) by default\n mlvl_assigned_gt_inds = []\n mlvl_ignore_flags = []\n for lvl in range(num_lvls):\n h, w = featmap_sizes[lvl]\n assert h * w == mlvl_anchors[lvl].shape[0]\n assigned_gt_inds = gt_bboxes.new_full((h * w, ),\n 0,\n dtype=torch.long)\n ignore_flags = torch.zeros_like(assigned_gt_inds)\n mlvl_assigned_gt_inds.append(assigned_gt_inds)\n mlvl_ignore_flags.append(ignore_flags)\n\n for gt_id in range(num_gts):\n lvl = target_lvls[gt_id].item()\n featmap_size = featmap_sizes[lvl]\n stride = anchor_strides[lvl]\n anchors = mlvl_anchors[lvl]\n gt_bbox = gt_bboxes[gt_id, :4]\n\n # Compute regions\n ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)\n ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)\n\n # 2. Assign -1 to ignore flags\n ignore_flags = anchor_ctr_inside_region_flags(\n anchors, stride, ignore_region)\n mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n # 3. Assign gt_bboxes to pos flags\n pos_flags = anchor_ctr_inside_region_flags(anchors, stride,\n ctr_region)\n mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1\n\n # 4. Assign -1 to ignore adjacent lvl\n if lvl > 0:\n d_lvl = lvl - 1\n d_anchors = mlvl_anchors[d_lvl]\n d_featmap_size = featmap_sizes[d_lvl]\n d_stride = anchor_strides[d_lvl]\n d_ignore_region = calc_region(gt_bbox, r2, d_stride,\n d_featmap_size)\n ignore_flags = anchor_ctr_inside_region_flags(\n d_anchors, d_stride, d_ignore_region)\n mlvl_ignore_flags[d_lvl][ignore_flags] = 1\n if lvl < num_lvls - 1:\n u_lvl = lvl + 1\n u_anchors = mlvl_anchors[u_lvl]\n u_featmap_size = featmap_sizes[u_lvl]\n u_stride = anchor_strides[u_lvl]\n u_ignore_region = calc_region(gt_bbox, r2, u_stride,\n u_featmap_size)\n ignore_flags = anchor_ctr_inside_region_flags(\n u_anchors, u_stride, u_ignore_region)\n mlvl_ignore_flags[u_lvl][ignore_flags] = 1\n\n # 4. (cont.) Assign -1 to ignore adjacent lvl\n for lvl in range(num_lvls):\n ignore_flags = mlvl_ignore_flags[lvl]\n mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n # 5. Assign -1 to anchor outside of image\n flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)\n flat_anchors = torch.cat(mlvl_anchors)\n flat_valid_flags = torch.cat(mlvl_valid_flags)\n assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==\n flat_valid_flags.shape[0])\n inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,\n img_meta['img_shape'],\n allowed_border)\n outside_flags = ~inside_flags\n flat_assigned_gt_inds[outside_flags] = -1\n\n if gt_labels is not None:\n assigned_labels = torch.zeros_like(flat_assigned_gt_inds)\n pos_flags = assigned_gt_inds > 0\n assigned_labels[pos_flags] = gt_labels[\n flat_assigned_gt_inds[pos_flags] - 1]\n else:\n assigned_labels = None\n\n return AssignResult(\n num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)\n", "path": "mmdet/core/bbox/assigners/region_assigner.py"}]}
| 3,012 | 371 |
gh_patches_debug_1493
|
rasdani/github-patches
|
git_diff
|
plotly__dash-601
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extract meta failure on missing props docstring.
If a props is missing a docstring, it will fail to generate the component with js error, `Cannot read property 'length' of undefined`.
https://community.plot.ly/t/dash-component-creation-javascript-ok-nothing-rendered-in-python/19369
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/development/component_generator.py`
Content:
```
1 from __future__ import print_function
2 from collections import OrderedDict
3
4 import json
5 import sys
6 import subprocess
7 import shlex
8 import os
9 import argparse
10 import shutil
11 import functools
12
13 import pkg_resources
14
15 from ._r_components_generation import write_class_file
16 from ._r_components_generation import generate_exports
17 from ._py_components_generation import generate_class_file
18 from ._py_components_generation import generate_imports
19 from ._py_components_generation import generate_classes_files
20
21
22 class _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,
23 argparse.RawDescriptionHelpFormatter):
24 pass
25
26
27 # pylint: disable=too-many-locals
28 def generate_components(components_source, project_shortname,
29 package_info_filename='package.json',
30 ignore='^_',
31 rprefix=None):
32
33 project_shortname = project_shortname.replace('-', '_').rstrip('/\\')
34
35 if rprefix:
36 prefix = rprefix
37
38 is_windows = sys.platform == 'win32'
39
40 extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')
41
42 os.environ['NODE_PATH'] = 'node_modules'
43 cmd = shlex.split(
44 'node {} {} {}'.format(extract_path, ignore, components_source),
45 posix=not is_windows
46 )
47
48 shutil.copyfile('package.json',
49 os.path.join(project_shortname, package_info_filename))
50
51 proc = subprocess.Popen(cmd,
52 stdout=subprocess.PIPE,
53 stderr=subprocess.PIPE,
54 shell=is_windows)
55 out, err = proc.communicate()
56 status = proc.poll()
57
58 if err:
59 print(err.decode(), file=sys.stderr)
60
61 if not out:
62 print(
63 'Error generating metadata in {} (status={})'.format(
64 project_shortname, status),
65 file=sys.stderr)
66 sys.exit(1)
67
68 jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)
69
70 if sys.version_info[0] >= 3:
71 metadata = jsondata_unicode
72 else:
73 metadata = byteify(jsondata_unicode)
74
75 generator_methods = [generate_class_file]
76
77 if rprefix:
78 if not os.path.exists('man'):
79 os.makedirs('man')
80 if not os.path.exists('R'):
81 os.makedirs('R')
82 generator_methods.append(
83 functools.partial(write_class_file, prefix=prefix))
84
85 components = generate_classes_files(
86 project_shortname,
87 metadata,
88 *generator_methods
89 )
90
91 with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:
92 json.dump(metadata, f)
93
94 generate_imports(project_shortname, components)
95
96 if rprefix:
97 with open('package.json', 'r') as f:
98 jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)
99 if sys.version_info[0] >= 3:
100 pkg_data = jsondata_unicode
101 else:
102 pkg_data = byteify(jsondata_unicode)
103
104 generate_exports(
105 project_shortname, components, metadata, pkg_data, prefix
106 )
107
108
109 def cli():
110 parser = argparse.ArgumentParser(
111 prog='dash-generate-components',
112 formatter_class=_CombinedFormatter,
113 description='Generate dash components by extracting the metadata '
114 'using react-docgen. Then map the metadata to python classes.'
115 )
116 parser.add_argument('components_source',
117 help='React components source directory.')
118 parser.add_argument(
119 'project_shortname',
120 help='Name of the project to export the classes files.'
121 )
122 parser.add_argument(
123 '-p', '--package-info-filename',
124 default='package.json',
125 help='The filename of the copied `package.json` to `project_shortname`'
126 )
127 parser.add_argument(
128 '-i', '--ignore',
129 default='^_',
130 help='Files/directories matching the pattern will be ignored'
131 )
132 parser.add_argument(
133 '--r-prefix',
134 help='Experimental: specify a prefix for DashR component names, write'
135 'DashR components to R dir, create R package.'
136 )
137
138 args = parser.parse_args()
139 generate_components(
140 args.components_source, args.project_shortname,
141 package_info_filename=args.package_info_filename,
142 ignore=args.ignore,
143 rprefix=args.r_prefix)
144
145
146 # pylint: disable=undefined-variable
147 def byteify(input_object):
148 if isinstance(input_object, dict):
149 return OrderedDict([
150 (byteify(key), byteify(value))
151 for key, value in input_object.iteritems()
152 ])
153 elif isinstance(input_object, list):
154 return [byteify(element) for element in input_object]
155 elif isinstance(input_object, unicode): # noqa:F821
156 return input_object.encode('utf-8')
157 return input_object
158
159
160 if __name__ == '__main__':
161 cli()
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py
--- a/dash/development/component_generator.py
+++ b/dash/development/component_generator.py
@@ -89,7 +89,7 @@
)
with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:
- json.dump(metadata, f)
+ json.dump(metadata, f, indent=2)
generate_imports(project_shortname, components)
|
{"golden_diff": "diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py\n--- a/dash/development/component_generator.py\n+++ b/dash/development/component_generator.py\n@@ -89,7 +89,7 @@\n )\n \n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n- json.dump(metadata, f)\n+ json.dump(metadata, f, indent=2)\n \n generate_imports(project_shortname, components)\n", "issue": "Extract meta failure on missing props docstring.\nIf a props is missing a docstring, it will fail to generate the component with js error, `Cannot read property 'length' of undefined`.\r\n\r\nhttps://community.plot.ly/t/dash-component-creation-javascript-ok-nothing-rendered-in-python/19369\n", "before_files": [{"content": "from __future__ import print_function\nfrom collections import OrderedDict\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\nimport functools\n\nimport pkg_resources\n\nfrom ._r_components_generation import write_class_file\nfrom ._r_components_generation import generate_exports\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json',\n ignore='^_',\n rprefix=None):\n\n project_shortname = project_shortname.replace('-', '_').rstrip('/\\\\')\n\n if rprefix:\n prefix = rprefix\n\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split(\n 'node {} {} {}'.format(extract_path, ignore, components_source),\n posix=not is_windows\n )\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)\n\n if sys.version_info[0] >= 3:\n metadata = jsondata_unicode\n else:\n metadata = byteify(jsondata_unicode)\n\n generator_methods = [generate_class_file]\n\n if rprefix:\n if not os.path.exists('man'):\n os.makedirs('man')\n if not os.path.exists('R'):\n os.makedirs('R')\n generator_methods.append(\n functools.partial(write_class_file, prefix=prefix))\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n *generator_methods\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f)\n\n generate_imports(project_shortname, components)\n\n if rprefix:\n with open('package.json', 'r') as f:\n jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)\n if sys.version_info[0] >= 3:\n pkg_data = jsondata_unicode\n else:\n pkg_data = byteify(jsondata_unicode)\n\n generate_exports(\n project_shortname, components, metadata, pkg_data, prefix\n )\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n parser.add_argument(\n '-i', '--ignore',\n default='^_',\n help='Files/directories matching the pattern will be ignored'\n )\n parser.add_argument(\n '--r-prefix',\n help='Experimental: specify a prefix for DashR component names, write'\n 'DashR components to R dir, create R package.'\n )\n\n args = parser.parse_args()\n generate_components(\n args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename,\n ignore=args.ignore,\n rprefix=args.r_prefix)\n\n\n# pylint: disable=undefined-variable\ndef byteify(input_object):\n if isinstance(input_object, dict):\n return OrderedDict([\n (byteify(key), byteify(value))\n for key, value in input_object.iteritems()\n ])\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, unicode): # noqa:F821\n return input_object.encode('utf-8')\n return input_object\n\n\nif __name__ == '__main__':\n cli()\n", "path": "dash/development/component_generator.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom collections import OrderedDict\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\nimport functools\n\nimport pkg_resources\n\nfrom ._r_components_generation import write_class_file\nfrom ._r_components_generation import generate_exports\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json',\n ignore='^_',\n rprefix=None):\n\n project_shortname = project_shortname.replace('-', '_').rstrip('/\\\\')\n\n if rprefix:\n prefix = rprefix\n\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split(\n 'node {} {} {}'.format(extract_path, ignore, components_source),\n posix=not is_windows\n )\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)\n\n if sys.version_info[0] >= 3:\n metadata = jsondata_unicode\n else:\n metadata = byteify(jsondata_unicode)\n\n generator_methods = [generate_class_file]\n\n if rprefix:\n if not os.path.exists('man'):\n os.makedirs('man')\n if not os.path.exists('R'):\n os.makedirs('R')\n generator_methods.append(\n functools.partial(write_class_file, prefix=prefix))\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n *generator_methods\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f, indent=2)\n\n generate_imports(project_shortname, components)\n\n if rprefix:\n with open('package.json', 'r') as f:\n jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)\n if sys.version_info[0] >= 3:\n pkg_data = jsondata_unicode\n else:\n pkg_data = byteify(jsondata_unicode)\n\n generate_exports(\n project_shortname, components, metadata, pkg_data, prefix\n )\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n parser.add_argument(\n '-i', '--ignore',\n default='^_',\n help='Files/directories matching the pattern will be ignored'\n )\n parser.add_argument(\n '--r-prefix',\n help='Experimental: specify a prefix for DashR component names, write'\n 'DashR components to R dir, create R package.'\n )\n\n args = parser.parse_args()\n generate_components(\n args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename,\n ignore=args.ignore,\n rprefix=args.r_prefix)\n\n\n# pylint: disable=undefined-variable\ndef byteify(input_object):\n if isinstance(input_object, dict):\n return OrderedDict([\n (byteify(key), byteify(value))\n for key, value in input_object.iteritems()\n ])\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, unicode): # noqa:F821\n return input_object.encode('utf-8')\n return input_object\n\n\nif __name__ == '__main__':\n cli()\n", "path": "dash/development/component_generator.py"}]}
| 1,715 | 106 |
gh_patches_debug_55329
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-7594
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support the use of SunPyBaseCoordinateFrame outside of core library
### Describe the feature
* It would be useful to be able to use `SunPyBaseCoordinateFrame` outside of sunpy but currently doing so is alkward as the `frame_to_wcs` and `wcs_to_frame` mappings are hardcode to the current frames / wcs' and raise an error you you try to do so
* The name clearly implies this is SunPy specific but there a lot of boiler plate code that would have to be copied to provide the same thing.
At the moment to make this work you have to make sure what ever frames are added are added before the sunpy function e.g.
`astropy.wcs.utils.FRAME_WCS_MAPPINGS.insert(1, [my_wcs_to_frame])`
### Proposed solution
Don't raise an error the if the frame is a subclass of `SunPyBaseCoordinateFrame` but not one of the frame implemented in sunpy return `None`
Specifically L198 raise an error as xcoord and xcoord are undefined
https://github.com/sunpy/sunpy/blob/2281a2198997e8671efd48dcb531b07a98b86ddf/sunpy/coordinates/wcs_utils.py#L173-L198
A simple solution would be to an a final else clause to this if Elias
```python
elif isinstance(frame, HeliographicStonyhurst):
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
else
return None
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/coordinates/wcs_utils.py`
Content:
```
1
2 import astropy.units as u
3 import astropy.wcs.utils
4 from astropy.coordinates import BaseCoordinateFrame, SkyCoord
5 from astropy.wcs import WCS
6 from astropy.wcs.utils import obsgeo_to_frame
7
8 from sunpy import log
9 from .frames import (
10 Heliocentric,
11 HeliographicCarrington,
12 HeliographicStonyhurst,
13 Helioprojective,
14 SunPyBaseCoordinateFrame,
15 )
16
17 __all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']
18
19
20 def solar_wcs_frame_mapping(wcs):
21 """
22 This function registers the coordinates frames to their FITS-WCS coordinate
23 type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
24
25 Parameters
26 ----------
27 wcs : astropy.wcs.WCS
28
29 Returns
30 -------
31 astropy.coordinates.BaseCoordinateFrame
32 """
33
34 if hasattr(wcs, "coordinate_frame"):
35 return wcs.coordinate_frame
36
37 dateobs = wcs.wcs.dateavg or wcs.wcs.dateobs or None
38
39 # Get observer coordinate from the WCS auxiliary information
40 # Note: the order of the entries is important, as it determines which set
41 # of header keys is given priority below. Stonyhurst should usually be
42 # prioritized, as it is defined more consistently across implementations,
43 # and so it should occur before Carrington here.
44 required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],
45 HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}
46
47 # Get rsun from the WCS auxiliary information
48 rsun = wcs.wcs.aux.rsun_ref
49 if rsun is not None:
50 rsun *= u.m
51
52 # TODO: remove these errors in sunpy 4.1
53 bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']
54 if hasattr(wcs, attr)]
55 if len(bad_attrs):
56 raise ValueError(f"The {' and '.join(bad_attrs)} attribute(s) on a WCS "
57 "are no longer supported.")
58
59 observer = None
60 for frame, attr_names in required_attrs.items():
61 attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]
62 if all([attr is not None for attr in attrs]):
63 kwargs = {'obstime': dateobs}
64 if rsun is not None:
65 kwargs['rsun'] = rsun
66 if issubclass(frame, HeliographicCarrington):
67 kwargs['observer'] = 'self'
68
69 observer = frame(attrs[0] * u.deg,
70 attrs[1] * u.deg,
71 attrs[2] * u.m,
72 **kwargs)
73 break
74
75 # Read the observer out of obsgeo for ground based observers
76 if observer is None:
77 try:
78 observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)
79 observer = SkyCoord(observer, rsun=rsun)
80 except ValueError as e:
81 # The helper function assumes you know the obsgeo coords you are
82 # parsing are good, we are not sure, so catch the error.
83
84 # This approach could lead to an invalid observer (i.e. one of the
85 # coords being NaN), but only if the WCS has been constructed like that.
86 log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}")
87
88 # Collect all of the possible frame attributes, although some may be removed later
89 frame_args = {'obstime': dateobs}
90 if observer is not None:
91 frame_args['observer'] = observer
92 if rsun is not None:
93 frame_args['rsun'] = rsun
94
95 frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)
96
97 if frame_class:
98 if frame_class == HeliographicStonyhurst:
99 frame_args.pop('observer', None)
100 if frame_class == Heliocentric:
101 frame_args.pop('rsun', None)
102
103 return frame_class(**frame_args)
104
105
106 def _sunpy_frame_class_from_ctypes(ctypes):
107 # Truncate the ctype to the first four letters
108 ctypes = {c[:4] for c in ctypes}
109
110 mapping = {
111 Helioprojective: {'HPLN', 'HPLT'},
112 HeliographicStonyhurst: {'HGLN', 'HGLT'},
113 HeliographicCarrington: {'CRLN', 'CRLT'},
114 Heliocentric: {'SOLX', 'SOLY'},
115 }
116
117 for frame_class, ctype_pair in mapping.items():
118 if ctype_pair <= ctypes:
119 return frame_class
120
121
122 def _set_wcs_aux_obs_coord(wcs, obs_frame):
123 """
124 Set (in-place) observer coordinate information on a WCS.
125
126 Parameters
127 ----------
128 wcs : astropy.wcs.WCS
129 obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame
130 """
131 # Sometimes obs_coord can be a SkyCoord, so convert down to a frame
132 if hasattr(obs_frame, 'frame'):
133 obs_frame = obs_frame.frame
134
135 if isinstance(obs_frame, HeliographicStonyhurst):
136 wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)
137 elif isinstance(obs_frame, HeliographicCarrington):
138 wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)
139 else:
140 raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')
141 # These two keywords are the same for Carrington and Stonyhurst
142 wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)
143 wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)
144
145
146 def solar_frame_to_wcs_mapping(frame, projection='TAN'):
147 """
148 For a given frame, this function returns the corresponding WCS object.
149 It registers the WCS coordinates types from their associated frame in the
150 `astropy.wcs.utils.celestial_frame_to_wcs` registry.
151
152 Parameters
153 ----------
154 frame : astropy.coordinates.BaseCoordinateFrame
155 projection : str, optional
156
157 Returns
158 -------
159 astropy.wcs.WCS
160 """
161 wcs = WCS(naxis=2)
162
163 if hasattr(frame, 'rsun'):
164 wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)
165
166 if hasattr(frame, 'observer') and frame.observer is not None:
167 if isinstance(frame.observer, BaseCoordinateFrame):
168 observer = frame.observer
169 elif frame.observer == 'self':
170 observer = frame
171 _set_wcs_aux_obs_coord(wcs, observer)
172
173 if isinstance(frame, SunPyBaseCoordinateFrame):
174
175 if frame.obstime:
176 wcs.wcs.dateobs = frame.obstime.utc.isot
177
178 if isinstance(frame, Helioprojective):
179 xcoord = 'HPLN' + '-' + projection
180 ycoord = 'HPLT' + '-' + projection
181 wcs.wcs.cunit = ['arcsec', 'arcsec']
182 elif isinstance(frame, Heliocentric):
183 xcoord = 'SOLX'
184 ycoord = 'SOLY'
185 wcs.wcs.cunit = ['deg', 'deg']
186 elif isinstance(frame, HeliographicCarrington):
187 xcoord = 'CRLN' + '-' + projection
188 ycoord = 'CRLT' + '-' + projection
189 wcs.wcs.cunit = ['deg', 'deg']
190 elif isinstance(frame, HeliographicStonyhurst):
191 xcoord = 'HGLN' + '-' + projection
192 ycoord = 'HGLT' + '-' + projection
193 wcs.wcs.cunit = ['deg', 'deg']
194
195 else:
196 return None
197
198 wcs.wcs.ctype = [xcoord, ycoord]
199
200 return wcs
201
202
203 astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
204 astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/coordinates/wcs_utils.py b/sunpy/coordinates/wcs_utils.py
--- a/sunpy/coordinates/wcs_utils.py
+++ b/sunpy/coordinates/wcs_utils.py
@@ -191,6 +191,9 @@
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
+ else:
+ # A subclass not supported by the core library
+ return None
else:
return None
|
{"golden_diff": "diff --git a/sunpy/coordinates/wcs_utils.py b/sunpy/coordinates/wcs_utils.py\n--- a/sunpy/coordinates/wcs_utils.py\n+++ b/sunpy/coordinates/wcs_utils.py\n@@ -191,6 +191,9 @@\n xcoord = 'HGLN' + '-' + projection\n ycoord = 'HGLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n+ else:\n+ # A subclass not supported by the core library\n+ return None\n \n else:\n return None\n", "issue": "Support the use of SunPyBaseCoordinateFrame outside of core library\n### Describe the feature\r\n\r\n* It would be useful to be able to use `SunPyBaseCoordinateFrame` outside of sunpy but currently doing so is alkward as the `frame_to_wcs` and `wcs_to_frame` mappings are hardcode to the current frames / wcs' and raise an error you you try to do so\r\n* The name clearly implies this is SunPy specific but there a lot of boiler plate code that would have to be copied to provide the same thing.\r\n\r\nAt the moment to make this work you have to make sure what ever frames are added are added before the sunpy function e.g.\r\n\r\n`astropy.wcs.utils.FRAME_WCS_MAPPINGS.insert(1, [my_wcs_to_frame])`\r\n\r\n### Proposed solution\r\n\r\nDon't raise an error the if the frame is a subclass of `SunPyBaseCoordinateFrame` but not one of the frame implemented in sunpy return `None` \r\n\r\nSpecifically L198 raise an error as xcoord and xcoord are undefined\r\nhttps://github.com/sunpy/sunpy/blob/2281a2198997e8671efd48dcb531b07a98b86ddf/sunpy/coordinates/wcs_utils.py#L173-L198\r\n\r\nA simple solution would be to an a final else clause to this if Elias\r\n```python\r\n elif isinstance(frame, HeliographicStonyhurst):\r\n xcoord = 'HGLN' + '-' + projection\r\n ycoord = 'HGLT' + '-' + projection\r\n wcs.wcs.cunit = ['deg', 'deg']\r\n else\r\n return None\r\n\r\n```\n", "before_files": [{"content": "\nimport astropy.units as u\nimport astropy.wcs.utils\nfrom astropy.coordinates import BaseCoordinateFrame, SkyCoord\nfrom astropy.wcs import WCS\nfrom astropy.wcs.utils import obsgeo_to_frame\n\nfrom sunpy import log\nfrom .frames import (\n Heliocentric,\n HeliographicCarrington,\n HeliographicStonyhurst,\n Helioprojective,\n SunPyBaseCoordinateFrame,\n)\n\n__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']\n\n\ndef solar_wcs_frame_mapping(wcs):\n \"\"\"\n This function registers the coordinates frames to their FITS-WCS coordinate\n type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n\n Returns\n -------\n astropy.coordinates.BaseCoordinateFrame\n \"\"\"\n\n if hasattr(wcs, \"coordinate_frame\"):\n return wcs.coordinate_frame\n\n dateobs = wcs.wcs.dateavg or wcs.wcs.dateobs or None\n\n # Get observer coordinate from the WCS auxiliary information\n # Note: the order of the entries is important, as it determines which set\n # of header keys is given priority below. Stonyhurst should usually be\n # prioritized, as it is defined more consistently across implementations,\n # and so it should occur before Carrington here.\n required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],\n HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}\n\n # Get rsun from the WCS auxiliary information\n rsun = wcs.wcs.aux.rsun_ref\n if rsun is not None:\n rsun *= u.m\n\n # TODO: remove these errors in sunpy 4.1\n bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']\n if hasattr(wcs, attr)]\n if len(bad_attrs):\n raise ValueError(f\"The {' and '.join(bad_attrs)} attribute(s) on a WCS \"\n \"are no longer supported.\")\n\n observer = None\n for frame, attr_names in required_attrs.items():\n attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]\n if all([attr is not None for attr in attrs]):\n kwargs = {'obstime': dateobs}\n if rsun is not None:\n kwargs['rsun'] = rsun\n if issubclass(frame, HeliographicCarrington):\n kwargs['observer'] = 'self'\n\n observer = frame(attrs[0] * u.deg,\n attrs[1] * u.deg,\n attrs[2] * u.m,\n **kwargs)\n break\n\n # Read the observer out of obsgeo for ground based observers\n if observer is None:\n try:\n observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)\n observer = SkyCoord(observer, rsun=rsun)\n except ValueError as e:\n # The helper function assumes you know the obsgeo coords you are\n # parsing are good, we are not sure, so catch the error.\n\n # This approach could lead to an invalid observer (i.e. one of the\n # coords being NaN), but only if the WCS has been constructed like that.\n log.debug(f\"Could not parse obsgeo coordinates from WCS:\\n{e}\")\n\n # Collect all of the possible frame attributes, although some may be removed later\n frame_args = {'obstime': dateobs}\n if observer is not None:\n frame_args['observer'] = observer\n if rsun is not None:\n frame_args['rsun'] = rsun\n\n frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)\n\n if frame_class:\n if frame_class == HeliographicStonyhurst:\n frame_args.pop('observer', None)\n if frame_class == Heliocentric:\n frame_args.pop('rsun', None)\n\n return frame_class(**frame_args)\n\n\ndef _sunpy_frame_class_from_ctypes(ctypes):\n # Truncate the ctype to the first four letters\n ctypes = {c[:4] for c in ctypes}\n\n mapping = {\n Helioprojective: {'HPLN', 'HPLT'},\n HeliographicStonyhurst: {'HGLN', 'HGLT'},\n HeliographicCarrington: {'CRLN', 'CRLT'},\n Heliocentric: {'SOLX', 'SOLY'},\n }\n\n for frame_class, ctype_pair in mapping.items():\n if ctype_pair <= ctypes:\n return frame_class\n\n\ndef _set_wcs_aux_obs_coord(wcs, obs_frame):\n \"\"\"\n Set (in-place) observer coordinate information on a WCS.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame\n \"\"\"\n # Sometimes obs_coord can be a SkyCoord, so convert down to a frame\n if hasattr(obs_frame, 'frame'):\n obs_frame = obs_frame.frame\n\n if isinstance(obs_frame, HeliographicStonyhurst):\n wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)\n elif isinstance(obs_frame, HeliographicCarrington):\n wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)\n else:\n raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')\n # These two keywords are the same for Carrington and Stonyhurst\n wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)\n wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)\n\n\ndef solar_frame_to_wcs_mapping(frame, projection='TAN'):\n \"\"\"\n For a given frame, this function returns the corresponding WCS object.\n It registers the WCS coordinates types from their associated frame in the\n `astropy.wcs.utils.celestial_frame_to_wcs` registry.\n\n Parameters\n ----------\n frame : astropy.coordinates.BaseCoordinateFrame\n projection : str, optional\n\n Returns\n -------\n astropy.wcs.WCS\n \"\"\"\n wcs = WCS(naxis=2)\n\n if hasattr(frame, 'rsun'):\n wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)\n\n if hasattr(frame, 'observer') and frame.observer is not None:\n if isinstance(frame.observer, BaseCoordinateFrame):\n observer = frame.observer\n elif frame.observer == 'self':\n observer = frame\n _set_wcs_aux_obs_coord(wcs, observer)\n\n if isinstance(frame, SunPyBaseCoordinateFrame):\n\n if frame.obstime:\n wcs.wcs.dateobs = frame.obstime.utc.isot\n\n if isinstance(frame, Helioprojective):\n xcoord = 'HPLN' + '-' + projection\n ycoord = 'HPLT' + '-' + projection\n wcs.wcs.cunit = ['arcsec', 'arcsec']\n elif isinstance(frame, Heliocentric):\n xcoord = 'SOLX'\n ycoord = 'SOLY'\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicCarrington):\n xcoord = 'CRLN' + '-' + projection\n ycoord = 'CRLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicStonyhurst):\n xcoord = 'HGLN' + '-' + projection\n ycoord = 'HGLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n\n else:\n return None\n\n wcs.wcs.ctype = [xcoord, ycoord]\n\n return wcs\n\n\nastropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])\nastropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])\n", "path": "sunpy/coordinates/wcs_utils.py"}], "after_files": [{"content": "\nimport astropy.units as u\nimport astropy.wcs.utils\nfrom astropy.coordinates import BaseCoordinateFrame, SkyCoord\nfrom astropy.wcs import WCS\nfrom astropy.wcs.utils import obsgeo_to_frame\n\nfrom sunpy import log\nfrom .frames import (\n Heliocentric,\n HeliographicCarrington,\n HeliographicStonyhurst,\n Helioprojective,\n SunPyBaseCoordinateFrame,\n)\n\n__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']\n\n\ndef solar_wcs_frame_mapping(wcs):\n \"\"\"\n This function registers the coordinates frames to their FITS-WCS coordinate\n type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n\n Returns\n -------\n astropy.coordinates.BaseCoordinateFrame\n \"\"\"\n\n if hasattr(wcs, \"coordinate_frame\"):\n return wcs.coordinate_frame\n\n dateobs = wcs.wcs.dateavg or wcs.wcs.dateobs or None\n\n # Get observer coordinate from the WCS auxiliary information\n # Note: the order of the entries is important, as it determines which set\n # of header keys is given priority below. Stonyhurst should usually be\n # prioritized, as it is defined more consistently across implementations,\n # and so it should occur before Carrington here.\n required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],\n HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}\n\n # Get rsun from the WCS auxiliary information\n rsun = wcs.wcs.aux.rsun_ref\n if rsun is not None:\n rsun *= u.m\n\n # TODO: remove these errors in sunpy 4.1\n bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']\n if hasattr(wcs, attr)]\n if len(bad_attrs):\n raise ValueError(f\"The {' and '.join(bad_attrs)} attribute(s) on a WCS \"\n \"are no longer supported.\")\n\n observer = None\n for frame, attr_names in required_attrs.items():\n attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]\n if all([attr is not None for attr in attrs]):\n kwargs = {'obstime': dateobs}\n if rsun is not None:\n kwargs['rsun'] = rsun\n if issubclass(frame, HeliographicCarrington):\n kwargs['observer'] = 'self'\n\n observer = frame(attrs[0] * u.deg,\n attrs[1] * u.deg,\n attrs[2] * u.m,\n **kwargs)\n break\n\n # Read the observer out of obsgeo for ground based observers\n if observer is None:\n try:\n observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)\n observer = SkyCoord(observer, rsun=rsun)\n except ValueError as e:\n # The helper function assumes you know the obsgeo coords you are\n # parsing are good, we are not sure, so catch the error.\n\n # This approach could lead to an invalid observer (i.e. one of the\n # coords being NaN), but only if the WCS has been constructed like that.\n log.debug(f\"Could not parse obsgeo coordinates from WCS:\\n{e}\")\n\n # Collect all of the possible frame attributes, although some may be removed later\n frame_args = {'obstime': dateobs}\n if observer is not None:\n frame_args['observer'] = observer\n if rsun is not None:\n frame_args['rsun'] = rsun\n\n frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)\n\n if frame_class:\n if frame_class == HeliographicStonyhurst:\n frame_args.pop('observer', None)\n if frame_class == Heliocentric:\n frame_args.pop('rsun', None)\n\n return frame_class(**frame_args)\n\n\ndef _sunpy_frame_class_from_ctypes(ctypes):\n # Truncate the ctype to the first four letters\n ctypes = {c[:4] for c in ctypes}\n\n mapping = {\n Helioprojective: {'HPLN', 'HPLT'},\n HeliographicStonyhurst: {'HGLN', 'HGLT'},\n HeliographicCarrington: {'CRLN', 'CRLT'},\n Heliocentric: {'SOLX', 'SOLY'},\n }\n\n for frame_class, ctype_pair in mapping.items():\n if ctype_pair <= ctypes:\n return frame_class\n\n\ndef _set_wcs_aux_obs_coord(wcs, obs_frame):\n \"\"\"\n Set (in-place) observer coordinate information on a WCS.\n\n Parameters\n ----------\n wcs : astropy.wcs.WCS\n obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame\n \"\"\"\n # Sometimes obs_coord can be a SkyCoord, so convert down to a frame\n if hasattr(obs_frame, 'frame'):\n obs_frame = obs_frame.frame\n\n if isinstance(obs_frame, HeliographicStonyhurst):\n wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)\n elif isinstance(obs_frame, HeliographicCarrington):\n wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)\n else:\n raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')\n # These two keywords are the same for Carrington and Stonyhurst\n wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)\n wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)\n\n\ndef solar_frame_to_wcs_mapping(frame, projection='TAN'):\n \"\"\"\n For a given frame, this function returns the corresponding WCS object.\n It registers the WCS coordinates types from their associated frame in the\n `astropy.wcs.utils.celestial_frame_to_wcs` registry.\n\n Parameters\n ----------\n frame : astropy.coordinates.BaseCoordinateFrame\n projection : str, optional\n\n Returns\n -------\n astropy.wcs.WCS\n \"\"\"\n wcs = WCS(naxis=2)\n\n if hasattr(frame, 'rsun'):\n wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)\n\n if hasattr(frame, 'observer') and frame.observer is not None:\n if isinstance(frame.observer, BaseCoordinateFrame):\n observer = frame.observer\n elif frame.observer == 'self':\n observer = frame\n _set_wcs_aux_obs_coord(wcs, observer)\n\n if isinstance(frame, SunPyBaseCoordinateFrame):\n\n if frame.obstime:\n wcs.wcs.dateobs = frame.obstime.utc.isot\n\n if isinstance(frame, Helioprojective):\n xcoord = 'HPLN' + '-' + projection\n ycoord = 'HPLT' + '-' + projection\n wcs.wcs.cunit = ['arcsec', 'arcsec']\n elif isinstance(frame, Heliocentric):\n xcoord = 'SOLX'\n ycoord = 'SOLY'\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicCarrington):\n xcoord = 'CRLN' + '-' + projection\n ycoord = 'CRLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n elif isinstance(frame, HeliographicStonyhurst):\n xcoord = 'HGLN' + '-' + projection\n ycoord = 'HGLT' + '-' + projection\n wcs.wcs.cunit = ['deg', 'deg']\n else:\n # A subclass not supported by the core library\n return None\n\n else:\n return None\n\n wcs.wcs.ctype = [xcoord, ycoord]\n\n return wcs\n\n\nastropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])\nastropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])\n", "path": "sunpy/coordinates/wcs_utils.py"}]}
| 2,927 | 131 |
gh_patches_debug_25415
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-2071
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CT failing since at least 2018-01-07
CT has been failing since 2018-01-07
Based on automated runs it appears that CT has not run successfully in 2 days (2018-01-07).
```
23:00:35 INFO scrapelib: GET - ftp://ftp.cga.ct.gov
23:00:35 CRITICAL pupa: Session(s) 2018 were reported by Connecticut.get_session_list() but were not found in Connecticut.legislative_sessions or Connecticut.ignored_scraped_sessions.
no pupa_settings on path, using defaults
ct (scrape, import)
events: {}
bills: {}
people: {}
```
Visit http://bobsled.openstates.org for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ct/__init__.py`
Content:
```
1 import lxml.html
2
3 from pupa.scrape import Jurisdiction, Organization
4 from .people import CTPersomScraper
5 from .bills import CTBillScraper
6 from .events import CTEventScraper
7
8 settings = {
9 'SCRAPELIB_RPM': 20
10 }
11
12
13 class Connecticut(Jurisdiction):
14 division_id = "ocd-division/country:us/state:ct"
15 classification = "government"
16 name = "Connecticut"
17 url = "http://www.cga.ct.gov/"
18 scrapers = {
19 'people': CTPersomScraper,
20 'bills': CTBillScraper,
21 'events': CTEventScraper,
22 }
23 parties = [
24 {'name': 'Republican'},
25 {'name': 'Democratic'}
26 ]
27 legislative_sessions = [
28 {
29 "_scraped_name": "2011",
30 "identifier": "2011",
31 "name": "2011 Regular Session"
32 },
33 {
34 "_scraped_name": "2012",
35 "identifier": "2012",
36 "name": "2012 Regular Session"
37 },
38 {
39 "_scraped_name": "2013",
40 "identifier": "2013",
41 "name": "2013 Regular Session"
42 },
43 {
44 "_scraped_name": "2014",
45 "identifier": "2014",
46 "name": "2014 Regular Session"
47 },
48 {
49 "_scraped_name": "2015",
50 "identifier": "2015",
51 "name": "2015 Regular Session"
52 },
53 {
54 "_scraped_name": "2016",
55 "end_date": "2016-05-04",
56 "identifier": "2016",
57 "name": "2016 Regular Session",
58 "start_date": "2016-02-03"
59 },
60 {
61 "_scraped_name": "2017",
62 "identifier": "2017",
63 "name": "2017 Regular Session",
64 "start_date": "2017-01-04",
65 "end_date": "2017-06-07",
66 },
67 ]
68 ignored_scraped_sessions = [
69 "2010",
70 "2009",
71 "2008",
72 "2007",
73 "2006",
74 "2005"
75 ]
76
77 def get_organizations(self):
78 legislature_name = "Connecticut General Assembly"
79 lower_chamber_name = "House"
80 lower_seats = 151
81 lower_title = "Representative"
82 upper_chamber_name = "Senate"
83 upper_seats = 36
84 upper_title = "Senator"
85
86 legislature = Organization(name=legislature_name,
87 classification="legislature")
88 upper = Organization(upper_chamber_name, classification='upper',
89 parent_id=legislature._id)
90 lower = Organization(lower_chamber_name, classification='lower',
91 parent_id=legislature._id)
92
93 for n in range(1, upper_seats+1):
94 upper.add_post(
95 label=str(n), role=upper_title,
96 division_id='{}/sldu:{}'.format(self.division_id, n))
97 for n in range(1, lower_seats+1):
98 lower.add_post(
99 label=str(n), role=lower_title,
100 division_id='{}/sldl:{}'.format(self.division_id, n))
101
102 yield legislature
103 yield upper
104 yield lower
105
106 def get_session_list(self):
107 import scrapelib
108 text = scrapelib.Scraper().get('ftp://ftp.cga.ct.gov').text
109 sessions = [line.split()[-1] for line in text.splitlines()]
110
111 for not_session_name in ('incoming', 'pub', 'CGAAudio', 'rba', 'NCSL', "apaac",
112 'FOI_1', 'stainedglass', ):
113 sessions.remove(not_session_name)
114 return sessions
115
116 def get_extract_text(self, doc, data):
117 doc = lxml.html.fromstring(data)
118 text = ' '.join(p.text_content() for p in doc.xpath('//body/p'))
119 return text
120
```
Path: `billy_metadata/ct.py`
Content:
```
1 import datetime
2
3
4 metadata = {
5 'name': 'Connecticut',
6 'abbreviation': 'ct',
7 'legislature_name': 'Connecticut General Assembly',
8 'legislature_url': 'http://www.cga.ct.gov/',
9 'capitol_timezone': 'America/New_York',
10 'chambers': {
11 'upper': {'name': 'Senate', 'title': 'Senator'},
12 'lower': {'name': 'House', 'title': 'Representative'},
13 },
14 'terms': [
15 {
16 'name': '2011-2012',
17 'start_year': 2011,
18 'end_year': 2012,
19 'sessions': ['2011', '2012'],
20 },
21 {
22 'name': '2013-2014',
23 'start_year': 2013,
24 'end_year': 2014,
25 'sessions': ['2013', '2014'],
26 },
27 {
28 'name': '2015-2016',
29 'start_year': 2015,
30 'end_year': 2016,
31 'sessions': ['2015', '2016'],
32 },
33 {
34 'name': '2017-2018',
35 'start_year': 2017,
36 'end_year': 2018,
37 'sessions': ['2017'],
38 },
39 ],
40 'session_details': {
41 '2011': {
42 'display_name': '2011 Regular Session',
43 '_scraped_name': '2011',
44 },
45 '2012': {
46 'display_name': '2012 Regular Session',
47 '_scraped_name': '2012',
48 },
49 '2013': {
50 'display_name': '2013 Regular Session',
51 '_scraped_name': '2013',
52 },
53 '2014': {
54 'display_name': '2014 Regular Session',
55 '_scraped_name': '2014',
56 },
57 '2015': {
58 'display_name': '2015 Regular Session',
59 '_scraped_name': '2015',
60 },
61 '2016': {
62 'display_name': '2016 Regular Session',
63 'start_date': datetime.date(2016, 2, 3),
64 'end_date': datetime.date(2016, 5, 4),
65 '_scraped_name': '2016',
66 },
67 '2017': {
68 'display_name': '2017 Regular Session',
69 '_scraped_name': '2017',
70 },
71 },
72 'feature_flags': ['subjects', 'events', 'influenceexplorer'],
73 '_ignored_scraped_sessions': [
74 '2010',
75 '2009',
76 '2008',
77 '2007',
78 '2006',
79 '2005',
80 ],
81 }
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/billy_metadata/ct.py b/billy_metadata/ct.py
--- a/billy_metadata/ct.py
+++ b/billy_metadata/ct.py
@@ -34,7 +34,7 @@
'name': '2017-2018',
'start_year': 2017,
'end_year': 2018,
- 'sessions': ['2017'],
+ 'sessions': ['2017', '2018'],
},
],
'session_details': {
@@ -68,6 +68,12 @@
'display_name': '2017 Regular Session',
'_scraped_name': '2017',
},
+ '2018': {
+ 'display_name': '2018 Regular Session',
+ 'start_date': datetime.date(2018, 1, 10),
+ 'end_date': datetime.date(2018, 5, 9),
+ '_scraped_name': '2018',
+ },
},
'feature_flags': ['subjects', 'events', 'influenceexplorer'],
'_ignored_scraped_sessions': [
diff --git a/openstates/ct/__init__.py b/openstates/ct/__init__.py
--- a/openstates/ct/__init__.py
+++ b/openstates/ct/__init__.py
@@ -64,6 +64,13 @@
"start_date": "2017-01-04",
"end_date": "2017-06-07",
},
+ {
+ "_scraped_name": "2018",
+ "identifier": "2018",
+ "name": "2018 Regular Session",
+ "start_date": "2018-01-10",
+ "end_date": "2018-05-09",
+ },
]
ignored_scraped_sessions = [
"2010",
|
{"golden_diff": "diff --git a/billy_metadata/ct.py b/billy_metadata/ct.py\n--- a/billy_metadata/ct.py\n+++ b/billy_metadata/ct.py\n@@ -34,7 +34,7 @@\n 'name': '2017-2018',\n 'start_year': 2017,\n 'end_year': 2018,\n- 'sessions': ['2017'],\n+ 'sessions': ['2017', '2018'],\n },\n ],\n 'session_details': {\n@@ -68,6 +68,12 @@\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n+ '2018': {\n+ 'display_name': '2018 Regular Session',\n+ 'start_date': datetime.date(2018, 1, 10),\n+ 'end_date': datetime.date(2018, 5, 9),\n+ '_scraped_name': '2018',\n+ },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\ndiff --git a/openstates/ct/__init__.py b/openstates/ct/__init__.py\n--- a/openstates/ct/__init__.py\n+++ b/openstates/ct/__init__.py\n@@ -64,6 +64,13 @@\n \"start_date\": \"2017-01-04\",\n \"end_date\": \"2017-06-07\",\n },\n+ {\n+ \"_scraped_name\": \"2018\",\n+ \"identifier\": \"2018\",\n+ \"name\": \"2018 Regular Session\",\n+ \"start_date\": \"2018-01-10\",\n+ \"end_date\": \"2018-05-09\",\n+ },\n ]\n ignored_scraped_sessions = [\n \"2010\",\n", "issue": "CT failing since at least 2018-01-07\nCT has been failing since 2018-01-07\n\nBased on automated runs it appears that CT has not run successfully in 2 days (2018-01-07).\n\n\n```\n 23:00:35 INFO scrapelib: GET - ftp://ftp.cga.ct.gov\n23:00:35 CRITICAL pupa: Session(s) 2018 were reported by Connecticut.get_session_list() but were not found in Connecticut.legislative_sessions or Connecticut.ignored_scraped_sessions.\nno pupa_settings on path, using defaults\nct (scrape, import)\n events: {}\n bills: {}\n people: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import lxml.html\n\nfrom pupa.scrape import Jurisdiction, Organization\nfrom .people import CTPersomScraper\nfrom .bills import CTBillScraper\nfrom .events import CTEventScraper\n\nsettings = {\n 'SCRAPELIB_RPM': 20\n}\n\n\nclass Connecticut(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ct\"\n classification = \"government\"\n name = \"Connecticut\"\n url = \"http://www.cga.ct.gov/\"\n scrapers = {\n 'people': CTPersomScraper,\n 'bills': CTBillScraper,\n 'events': CTEventScraper,\n }\n parties = [\n {'name': 'Republican'},\n {'name': 'Democratic'}\n ]\n legislative_sessions = [\n {\n \"_scraped_name\": \"2011\",\n \"identifier\": \"2011\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012\",\n \"identifier\": \"2012\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013\",\n \"identifier\": \"2013\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"2014\",\n \"identifier\": \"2014\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2015\",\n \"identifier\": \"2015\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"2016\",\n \"end_date\": \"2016-05-04\",\n \"identifier\": \"2016\",\n \"name\": \"2016 Regular Session\",\n \"start_date\": \"2016-02-03\"\n },\n {\n \"_scraped_name\": \"2017\",\n \"identifier\": \"2017\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-04\",\n \"end_date\": \"2017-06-07\",\n },\n ]\n ignored_scraped_sessions = [\n \"2010\",\n \"2009\",\n \"2008\",\n \"2007\",\n \"2006\",\n \"2005\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Connecticut General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 151\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 36\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import scrapelib\n text = scrapelib.Scraper().get('ftp://ftp.cga.ct.gov').text\n sessions = [line.split()[-1] for line in text.splitlines()]\n\n for not_session_name in ('incoming', 'pub', 'CGAAudio', 'rba', 'NCSL', \"apaac\",\n 'FOI_1', 'stainedglass', ):\n sessions.remove(not_session_name)\n return sessions\n\n def get_extract_text(self, doc, data):\n doc = lxml.html.fromstring(data)\n text = ' '.join(p.text_content() for p in doc.xpath('//body/p'))\n return text\n", "path": "openstates/ct/__init__.py"}, {"content": "import datetime\n\n\nmetadata = {\n 'name': 'Connecticut',\n 'abbreviation': 'ct',\n 'legislature_name': 'Connecticut General Assembly',\n 'legislature_url': 'http://www.cga.ct.gov/',\n 'capitol_timezone': 'America/New_York',\n 'chambers': {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n 'terms': [\n {\n 'name': '2011-2012',\n 'start_year': 2011,\n 'end_year': 2012,\n 'sessions': ['2011', '2012'],\n },\n {\n 'name': '2013-2014',\n 'start_year': 2013,\n 'end_year': 2014,\n 'sessions': ['2013', '2014'],\n },\n {\n 'name': '2015-2016',\n 'start_year': 2015,\n 'end_year': 2016,\n 'sessions': ['2015', '2016'],\n },\n {\n 'name': '2017-2018',\n 'start_year': 2017,\n 'end_year': 2018,\n 'sessions': ['2017'],\n },\n ],\n 'session_details': {\n '2011': {\n 'display_name': '2011 Regular Session',\n '_scraped_name': '2011',\n },\n '2012': {\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012',\n },\n '2013': {\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013',\n },\n '2014': {\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014',\n },\n '2015': {\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015',\n },\n '2016': {\n 'display_name': '2016 Regular Session',\n 'start_date': datetime.date(2016, 2, 3),\n 'end_date': datetime.date(2016, 5, 4),\n '_scraped_name': '2016',\n },\n '2017': {\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\n '2010',\n '2009',\n '2008',\n '2007',\n '2006',\n '2005',\n ],\n}\n", "path": "billy_metadata/ct.py"}], "after_files": [{"content": "import lxml.html\n\nfrom pupa.scrape import Jurisdiction, Organization\nfrom .people import CTPersomScraper\nfrom .bills import CTBillScraper\nfrom .events import CTEventScraper\n\nsettings = {\n 'SCRAPELIB_RPM': 20\n}\n\n\nclass Connecticut(Jurisdiction):\n division_id = \"ocd-division/country:us/state:ct\"\n classification = \"government\"\n name = \"Connecticut\"\n url = \"http://www.cga.ct.gov/\"\n scrapers = {\n 'people': CTPersomScraper,\n 'bills': CTBillScraper,\n 'events': CTEventScraper,\n }\n parties = [\n {'name': 'Republican'},\n {'name': 'Democratic'}\n ]\n legislative_sessions = [\n {\n \"_scraped_name\": \"2011\",\n \"identifier\": \"2011\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"2012\",\n \"identifier\": \"2012\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"2013\",\n \"identifier\": \"2013\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"2014\",\n \"identifier\": \"2014\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"2015\",\n \"identifier\": \"2015\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"2016\",\n \"end_date\": \"2016-05-04\",\n \"identifier\": \"2016\",\n \"name\": \"2016 Regular Session\",\n \"start_date\": \"2016-02-03\"\n },\n {\n \"_scraped_name\": \"2017\",\n \"identifier\": \"2017\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-01-04\",\n \"end_date\": \"2017-06-07\",\n },\n {\n \"_scraped_name\": \"2018\",\n \"identifier\": \"2018\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-01-10\",\n \"end_date\": \"2018-05-09\",\n },\n ]\n ignored_scraped_sessions = [\n \"2010\",\n \"2009\",\n \"2008\",\n \"2007\",\n \"2006\",\n \"2005\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Connecticut General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 151\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 36\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import scrapelib\n text = scrapelib.Scraper().get('ftp://ftp.cga.ct.gov').text\n sessions = [line.split()[-1] for line in text.splitlines()]\n\n for not_session_name in ('incoming', 'pub', 'CGAAudio', 'rba', 'NCSL', \"apaac\",\n 'FOI_1', 'stainedglass', ):\n sessions.remove(not_session_name)\n return sessions\n\n def get_extract_text(self, doc, data):\n doc = lxml.html.fromstring(data)\n text = ' '.join(p.text_content() for p in doc.xpath('//body/p'))\n return text\n", "path": "openstates/ct/__init__.py"}, {"content": "import datetime\n\n\nmetadata = {\n 'name': 'Connecticut',\n 'abbreviation': 'ct',\n 'legislature_name': 'Connecticut General Assembly',\n 'legislature_url': 'http://www.cga.ct.gov/',\n 'capitol_timezone': 'America/New_York',\n 'chambers': {\n 'upper': {'name': 'Senate', 'title': 'Senator'},\n 'lower': {'name': 'House', 'title': 'Representative'},\n },\n 'terms': [\n {\n 'name': '2011-2012',\n 'start_year': 2011,\n 'end_year': 2012,\n 'sessions': ['2011', '2012'],\n },\n {\n 'name': '2013-2014',\n 'start_year': 2013,\n 'end_year': 2014,\n 'sessions': ['2013', '2014'],\n },\n {\n 'name': '2015-2016',\n 'start_year': 2015,\n 'end_year': 2016,\n 'sessions': ['2015', '2016'],\n },\n {\n 'name': '2017-2018',\n 'start_year': 2017,\n 'end_year': 2018,\n 'sessions': ['2017', '2018'],\n },\n ],\n 'session_details': {\n '2011': {\n 'display_name': '2011 Regular Session',\n '_scraped_name': '2011',\n },\n '2012': {\n 'display_name': '2012 Regular Session',\n '_scraped_name': '2012',\n },\n '2013': {\n 'display_name': '2013 Regular Session',\n '_scraped_name': '2013',\n },\n '2014': {\n 'display_name': '2014 Regular Session',\n '_scraped_name': '2014',\n },\n '2015': {\n 'display_name': '2015 Regular Session',\n '_scraped_name': '2015',\n },\n '2016': {\n 'display_name': '2016 Regular Session',\n 'start_date': datetime.date(2016, 2, 3),\n 'end_date': datetime.date(2016, 5, 4),\n '_scraped_name': '2016',\n },\n '2017': {\n 'display_name': '2017 Regular Session',\n '_scraped_name': '2017',\n },\n '2018': {\n 'display_name': '2018 Regular Session',\n 'start_date': datetime.date(2018, 1, 10),\n 'end_date': datetime.date(2018, 5, 9),\n '_scraped_name': '2018',\n },\n },\n 'feature_flags': ['subjects', 'events', 'influenceexplorer'],\n '_ignored_scraped_sessions': [\n '2010',\n '2009',\n '2008',\n '2007',\n '2006',\n '2005',\n ],\n}\n", "path": "billy_metadata/ct.py"}]}
| 2,535 | 463 |
gh_patches_debug_8788
|
rasdani/github-patches
|
git_diff
|
aws__sagemaker-python-sdk-978
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
transformer() method for an SKLearn estimator is failing
Please fill out the form below.
### System Information
- **Framework (e.g. TensorFlow) / Algorithm (e.g. KMeans)**: SKLearn
- **Framework Version**: sagemaker 1.36.1
- **Python Version**: 3.6
- **CPU or GPU**: CPU
- **Python SDK Version**: sagemaker 1.36.1
- **Are you using a custom image**: yes, using entry point script
### Describe the problem
An SKLearn estimator that is based on a custom algorithm (using entry_point script) was fitted and created a training job. Then, when calling the transformer method of that estimator with instance_count and instance_type parameters, a TypeError is thrown.
### Minimal repro / logs
- **Exact command to reproduce**:
sklearn = SKLearn(
entry_point=script_path,
train_instance_type="ml.m5.large",
role=role,
output_path =output_location,
code_location=code_location,
hyperparameters={'min_child_weight': 2, 'max_depth':8})
transformer = sklearn.transformer(instance_count=1, instance_type='ml.m5.large')
Error:
TypeError: __init__() got multiple values for argument 'entry_point'
- **Stack trace:**
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-20-07fcf819e9f1> in <module>()
----> 1 transformer = sklearn.transformer(instance_count=1, instance_type='ml.m5.large')
~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/estimator.py in transformer(self, instance_count, instance_type, strategy, assemble_with, output_path, output_kms_key, accept, env, max_concurrent_transforms, max_payload, tags, role, model_server_workers, volume_kms_key, entry_point)
1573 if self.latest_training_job is not None:
1574 model = self.create_model(
-> 1575 role=role, model_server_workers=model_server_workers, entry_point=entry_point
1576 )
1577
~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/sklearn/estimator.py in create_model(self, model_server_workers, role, vpc_config_override, **kwargs)
167 sagemaker_session=self.sagemaker_session,
168 vpc_config=self.get_vpc_config(vpc_config_override),
--> 169 **kwargs
170 )
171
TypeError: __init__() got multiple values for argument 'entry_point'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sagemaker/sklearn/estimator.py`
Content:
```
1 # Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """Placeholder docstring"""
14 from __future__ import absolute_import
15
16 import logging
17
18 from sagemaker.estimator import Framework
19 from sagemaker.fw_registry import default_framework_uri
20 from sagemaker.fw_utils import (
21 framework_name_from_image,
22 empty_framework_version_warning,
23 python_deprecation_warning,
24 )
25 from sagemaker.sklearn.defaults import SKLEARN_VERSION, SKLEARN_NAME
26 from sagemaker.sklearn.model import SKLearnModel
27 from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
28
29 logger = logging.getLogger("sagemaker")
30
31
32 class SKLearn(Framework):
33 """Handle end-to-end training and deployment of custom Scikit-learn code."""
34
35 __framework_name__ = SKLEARN_NAME
36
37 def __init__(
38 self,
39 entry_point,
40 framework_version=SKLEARN_VERSION,
41 source_dir=None,
42 hyperparameters=None,
43 py_version="py3",
44 image_name=None,
45 **kwargs
46 ):
47 """This ``Estimator`` executes an Scikit-learn script in a managed
48 Scikit-learn execution environment, within a SageMaker Training Job. The
49 managed Scikit-learn environment is an Amazon-built Docker container
50 that executes functions defined in the supplied ``entry_point`` Python
51 script.
52
53 Training is started by calling
54 :meth:`~sagemaker.amazon.estimator.Framework.fit` on this Estimator.
55 After training is complete, calling
56 :meth:`~sagemaker.amazon.estimator.Framework.deploy` creates a hosted
57 SageMaker endpoint and returns an
58 :class:`~sagemaker.amazon.sklearn.model.SKLearnPredictor` instance that
59 can be used to perform inference against the hosted model.
60
61 Technical documentation on preparing Scikit-learn scripts for
62 SageMaker training and using the Scikit-learn Estimator is available on
63 the project home-page: https://github.com/aws/sagemaker-python-sdk
64
65 Args:
66 entry_point (str): Path (absolute or relative) to the Python source
67 file which should be executed as the entry point to training.
68 This should be compatible with either Python 2.7 or Python 3.5.
69 framework_version (str): Scikit-learn version you want to use for
70 executing your model training code. List of supported versions
71 https://github.com/aws/sagemaker-python-sdk#sklearn-sagemaker-estimators
72 source_dir (str): Path (absolute or relative) to a directory with
73 any other training source code dependencies aside from tne entry
74 point file (default: None). Structure within this directory are
75 preserved when training on Amazon SageMaker.
76 hyperparameters (dict): Hyperparameters that will be used for
77 training (default: None). The hyperparameters are made
78 accessible as a dict[str, str] to the training code on
79 SageMaker. For convenience, this accepts other types for keys
80 and values, but ``str()`` will be called to convert them before
81 training.
82 py_version (str): Python version you want to use for executing your
83 model training code (default: 'py2'). One of 'py2' or 'py3'.
84 image_name (str): If specified, the estimator will use this image
85 for training and hosting, instead of selecting the appropriate
86 SageMaker official image based on framework_version and
87 py_version. It can be an ECR url or dockerhub image and tag.
88 Examples:
89 123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0
90 custom-image:latest.
91 **kwargs: Additional kwargs passed to the
92 :class:`~sagemaker.estimator.Framework` constructor.
93 """
94 # SciKit-Learn does not support distributed training or training on GPU instance types.
95 # Fail fast.
96 train_instance_type = kwargs.get("train_instance_type")
97 _validate_not_gpu_instance_type(train_instance_type)
98
99 train_instance_count = kwargs.get("train_instance_count")
100 if train_instance_count:
101 if train_instance_count != 1:
102 raise AttributeError(
103 "Scikit-Learn does not support distributed training. "
104 "Please remove the 'train_instance_count' argument or set "
105 "'train_instance_count=1' when initializing SKLearn."
106 )
107 super(SKLearn, self).__init__(
108 entry_point,
109 source_dir,
110 hyperparameters,
111 image_name=image_name,
112 **dict(kwargs, train_instance_count=1)
113 )
114
115 if py_version == "py2":
116 logger.warning(python_deprecation_warning(self.__framework_name__))
117
118 self.py_version = py_version
119
120 if framework_version is None:
121 logger.warning(empty_framework_version_warning(SKLEARN_VERSION, SKLEARN_VERSION))
122 self.framework_version = framework_version or SKLEARN_VERSION
123
124 if image_name is None:
125 image_tag = "{}-{}-{}".format(framework_version, "cpu", py_version)
126 self.image_name = default_framework_uri(
127 SKLearn.__framework_name__, self.sagemaker_session.boto_region_name, image_tag
128 )
129
130 def create_model(
131 self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT, **kwargs
132 ):
133 """Create a SageMaker ``SKLearnModel`` object that can be deployed to an
134 ``Endpoint``.
135
136 Args:
137 model_server_workers (int): Optional. The number of worker processes
138 used by the inference server. If None, server will use one
139 worker per vCPU.
140 role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
141 which is also used during transform jobs. If not specified, the
142 role from the Estimator will be used.
143 vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on
144 the model. Default: use subnets and security groups from this Estimator.
145 * 'Subnets' (list[str]): List of subnet ids.
146 * 'SecurityGroupIds' (list[str]): List of security group ids.
147 **kwargs: Passed to initialization of ``SKLearnModel``.
148
149 Returns:
150 sagemaker.sklearn.model.SKLearnModel: A SageMaker ``SKLearnModel``
151 object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.
152 """
153 role = role or self.role
154 return SKLearnModel(
155 self.model_data,
156 role,
157 self.entry_point,
158 source_dir=self._model_source_dir(),
159 enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,
160 name=self._current_job_name,
161 container_log_level=self.container_log_level,
162 code_location=self.code_location,
163 py_version=self.py_version,
164 framework_version=self.framework_version,
165 model_server_workers=model_server_workers,
166 image=self.image_name,
167 sagemaker_session=self.sagemaker_session,
168 vpc_config=self.get_vpc_config(vpc_config_override),
169 **kwargs
170 )
171
172 @classmethod
173 def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
174 """Convert the job description to init params that can be handled by the
175 class constructor
176
177 Args:
178 job_details: the returned job details from a describe_training_job
179 API call.
180 model_channel_name:
181
182 Returns:
183 dictionary: The transformed init_params
184 """
185 init_params = super(SKLearn, cls)._prepare_init_params_from_job_description(job_details)
186
187 image_name = init_params.pop("image")
188 framework, py_version, _, _ = framework_name_from_image(image_name)
189 init_params["py_version"] = py_version
190
191 if framework and framework != cls.__framework_name__:
192 training_job_name = init_params["base_job_name"]
193 raise ValueError(
194 "Training job: {} didn't use image for requested framework".format(
195 training_job_name
196 )
197 )
198 if not framework:
199 # If we were unable to parse the framework name from the image it is not one of our
200 # officially supported images, in this case just add the image to the init params.
201 init_params["image_name"] = image_name
202 return init_params
203
204
205 def _validate_not_gpu_instance_type(training_instance_type):
206 """
207 Args:
208 training_instance_type:
209 """
210 gpu_instance_types = [
211 "ml.p2.xlarge",
212 "ml.p2.8xlarge",
213 "ml.p2.16xlarge",
214 "ml.p3.xlarge",
215 "ml.p3.8xlarge",
216 "ml.p3.16xlarge",
217 ]
218
219 if training_instance_type in gpu_instance_types:
220 raise ValueError(
221 "GPU training in not supported for Scikit-Learn. "
222 "Please pick a different instance type from here: "
223 "https://aws.amazon.com/ec2/instance-types/"
224 )
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sagemaker/sklearn/estimator.py b/src/sagemaker/sklearn/estimator.py
--- a/src/sagemaker/sklearn/estimator.py
+++ b/src/sagemaker/sklearn/estimator.py
@@ -151,6 +151,12 @@
object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.
"""
role = role or self.role
+
+ # remove unwanted entry_point kwarg
+ if "entry_point" in kwargs:
+ logger.debug("removing unused entry_point argument: %s", str(kwargs["entry_point"]))
+ kwargs = {k: v for k, v in kwargs.items() if k != "entry_point"}
+
return SKLearnModel(
self.model_data,
role,
|
{"golden_diff": "diff --git a/src/sagemaker/sklearn/estimator.py b/src/sagemaker/sklearn/estimator.py\n--- a/src/sagemaker/sklearn/estimator.py\n+++ b/src/sagemaker/sklearn/estimator.py\n@@ -151,6 +151,12 @@\n object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.\n \"\"\"\n role = role or self.role\n+\n+ # remove unwanted entry_point kwarg\n+ if \"entry_point\" in kwargs:\n+ logger.debug(\"removing unused entry_point argument: %s\", str(kwargs[\"entry_point\"]))\n+ kwargs = {k: v for k, v in kwargs.items() if k != \"entry_point\"}\n+\n return SKLearnModel(\n self.model_data,\n role,\n", "issue": "transformer() method for an SKLearn estimator is failing\nPlease fill out the form below.\r\n\r\n### System Information\r\n- **Framework (e.g. TensorFlow) / Algorithm (e.g. KMeans)**: SKLearn\r\n- **Framework Version**: sagemaker 1.36.1\r\n- **Python Version**: 3.6\r\n- **CPU or GPU**: CPU\r\n- **Python SDK Version**: sagemaker 1.36.1\r\n- **Are you using a custom image**: yes, using entry point script\r\n\r\n### Describe the problem\r\nAn SKLearn estimator that is based on a custom algorithm (using entry_point script) was fitted and created a training job. Then, when calling the transformer method of that estimator with instance_count and instance_type parameters, a TypeError is thrown.\r\n\r\n### Minimal repro / logs\r\n- **Exact command to reproduce**:\r\nsklearn = SKLearn(\r\n entry_point=script_path,\r\n train_instance_type=\"ml.m5.large\",\r\n role=role,\r\n output_path =output_location,\r\n code_location=code_location,\r\n hyperparameters={'min_child_weight': 2, 'max_depth':8})\r\ntransformer = sklearn.transformer(instance_count=1, instance_type='ml.m5.large')\r\nError:\r\nTypeError: __init__() got multiple values for argument 'entry_point'\r\n\r\n- **Stack trace:**\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-20-07fcf819e9f1> in <module>()\r\n----> 1 transformer = sklearn.transformer(instance_count=1, instance_type='ml.m5.large')\r\n\r\n~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/estimator.py in transformer(self, instance_count, instance_type, strategy, assemble_with, output_path, output_kms_key, accept, env, max_concurrent_transforms, max_payload, tags, role, model_server_workers, volume_kms_key, entry_point)\r\n 1573 if self.latest_training_job is not None:\r\n 1574 model = self.create_model(\r\n-> 1575 role=role, model_server_workers=model_server_workers, entry_point=entry_point\r\n 1576 )\r\n 1577 \r\n\r\n~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/sklearn/estimator.py in create_model(self, model_server_workers, role, vpc_config_override, **kwargs)\r\n 167 sagemaker_session=self.sagemaker_session,\r\n 168 vpc_config=self.get_vpc_config(vpc_config_override),\r\n--> 169 **kwargs\r\n 170 )\r\n 171 \r\n\r\nTypeError: __init__() got multiple values for argument 'entry_point'\n", "before_files": [{"content": "# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Placeholder docstring\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom sagemaker.estimator import Framework\nfrom sagemaker.fw_registry import default_framework_uri\nfrom sagemaker.fw_utils import (\n framework_name_from_image,\n empty_framework_version_warning,\n python_deprecation_warning,\n)\nfrom sagemaker.sklearn.defaults import SKLEARN_VERSION, SKLEARN_NAME\nfrom sagemaker.sklearn.model import SKLearnModel\nfrom sagemaker.vpc_utils import VPC_CONFIG_DEFAULT\n\nlogger = logging.getLogger(\"sagemaker\")\n\n\nclass SKLearn(Framework):\n \"\"\"Handle end-to-end training and deployment of custom Scikit-learn code.\"\"\"\n\n __framework_name__ = SKLEARN_NAME\n\n def __init__(\n self,\n entry_point,\n framework_version=SKLEARN_VERSION,\n source_dir=None,\n hyperparameters=None,\n py_version=\"py3\",\n image_name=None,\n **kwargs\n ):\n \"\"\"This ``Estimator`` executes an Scikit-learn script in a managed\n Scikit-learn execution environment, within a SageMaker Training Job. The\n managed Scikit-learn environment is an Amazon-built Docker container\n that executes functions defined in the supplied ``entry_point`` Python\n script.\n\n Training is started by calling\n :meth:`~sagemaker.amazon.estimator.Framework.fit` on this Estimator.\n After training is complete, calling\n :meth:`~sagemaker.amazon.estimator.Framework.deploy` creates a hosted\n SageMaker endpoint and returns an\n :class:`~sagemaker.amazon.sklearn.model.SKLearnPredictor` instance that\n can be used to perform inference against the hosted model.\n\n Technical documentation on preparing Scikit-learn scripts for\n SageMaker training and using the Scikit-learn Estimator is available on\n the project home-page: https://github.com/aws/sagemaker-python-sdk\n\n Args:\n entry_point (str): Path (absolute or relative) to the Python source\n file which should be executed as the entry point to training.\n This should be compatible with either Python 2.7 or Python 3.5.\n framework_version (str): Scikit-learn version you want to use for\n executing your model training code. List of supported versions\n https://github.com/aws/sagemaker-python-sdk#sklearn-sagemaker-estimators\n source_dir (str): Path (absolute or relative) to a directory with\n any other training source code dependencies aside from tne entry\n point file (default: None). Structure within this directory are\n preserved when training on Amazon SageMaker.\n hyperparameters (dict): Hyperparameters that will be used for\n training (default: None). The hyperparameters are made\n accessible as a dict[str, str] to the training code on\n SageMaker. For convenience, this accepts other types for keys\n and values, but ``str()`` will be called to convert them before\n training.\n py_version (str): Python version you want to use for executing your\n model training code (default: 'py2'). One of 'py2' or 'py3'.\n image_name (str): If specified, the estimator will use this image\n for training and hosting, instead of selecting the appropriate\n SageMaker official image based on framework_version and\n py_version. It can be an ECR url or dockerhub image and tag.\n Examples:\n 123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0\n custom-image:latest.\n **kwargs: Additional kwargs passed to the\n :class:`~sagemaker.estimator.Framework` constructor.\n \"\"\"\n # SciKit-Learn does not support distributed training or training on GPU instance types.\n # Fail fast.\n train_instance_type = kwargs.get(\"train_instance_type\")\n _validate_not_gpu_instance_type(train_instance_type)\n\n train_instance_count = kwargs.get(\"train_instance_count\")\n if train_instance_count:\n if train_instance_count != 1:\n raise AttributeError(\n \"Scikit-Learn does not support distributed training. \"\n \"Please remove the 'train_instance_count' argument or set \"\n \"'train_instance_count=1' when initializing SKLearn.\"\n )\n super(SKLearn, self).__init__(\n entry_point,\n source_dir,\n hyperparameters,\n image_name=image_name,\n **dict(kwargs, train_instance_count=1)\n )\n\n if py_version == \"py2\":\n logger.warning(python_deprecation_warning(self.__framework_name__))\n\n self.py_version = py_version\n\n if framework_version is None:\n logger.warning(empty_framework_version_warning(SKLEARN_VERSION, SKLEARN_VERSION))\n self.framework_version = framework_version or SKLEARN_VERSION\n\n if image_name is None:\n image_tag = \"{}-{}-{}\".format(framework_version, \"cpu\", py_version)\n self.image_name = default_framework_uri(\n SKLearn.__framework_name__, self.sagemaker_session.boto_region_name, image_tag\n )\n\n def create_model(\n self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT, **kwargs\n ):\n \"\"\"Create a SageMaker ``SKLearnModel`` object that can be deployed to an\n ``Endpoint``.\n\n Args:\n model_server_workers (int): Optional. The number of worker processes\n used by the inference server. If None, server will use one\n worker per vCPU.\n role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,\n which is also used during transform jobs. If not specified, the\n role from the Estimator will be used.\n vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on\n the model. Default: use subnets and security groups from this Estimator.\n * 'Subnets' (list[str]): List of subnet ids.\n * 'SecurityGroupIds' (list[str]): List of security group ids.\n **kwargs: Passed to initialization of ``SKLearnModel``.\n\n Returns:\n sagemaker.sklearn.model.SKLearnModel: A SageMaker ``SKLearnModel``\n object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.\n \"\"\"\n role = role or self.role\n return SKLearnModel(\n self.model_data,\n role,\n self.entry_point,\n source_dir=self._model_source_dir(),\n enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,\n name=self._current_job_name,\n container_log_level=self.container_log_level,\n code_location=self.code_location,\n py_version=self.py_version,\n framework_version=self.framework_version,\n model_server_workers=model_server_workers,\n image=self.image_name,\n sagemaker_session=self.sagemaker_session,\n vpc_config=self.get_vpc_config(vpc_config_override),\n **kwargs\n )\n\n @classmethod\n def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n \"\"\"Convert the job description to init params that can be handled by the\n class constructor\n\n Args:\n job_details: the returned job details from a describe_training_job\n API call.\n model_channel_name:\n\n Returns:\n dictionary: The transformed init_params\n \"\"\"\n init_params = super(SKLearn, cls)._prepare_init_params_from_job_description(job_details)\n\n image_name = init_params.pop(\"image\")\n framework, py_version, _, _ = framework_name_from_image(image_name)\n init_params[\"py_version\"] = py_version\n\n if framework and framework != cls.__framework_name__:\n training_job_name = init_params[\"base_job_name\"]\n raise ValueError(\n \"Training job: {} didn't use image for requested framework\".format(\n training_job_name\n )\n )\n if not framework:\n # If we were unable to parse the framework name from the image it is not one of our\n # officially supported images, in this case just add the image to the init params.\n init_params[\"image_name\"] = image_name\n return init_params\n\n\ndef _validate_not_gpu_instance_type(training_instance_type):\n \"\"\"\n Args:\n training_instance_type:\n \"\"\"\n gpu_instance_types = [\n \"ml.p2.xlarge\",\n \"ml.p2.8xlarge\",\n \"ml.p2.16xlarge\",\n \"ml.p3.xlarge\",\n \"ml.p3.8xlarge\",\n \"ml.p3.16xlarge\",\n ]\n\n if training_instance_type in gpu_instance_types:\n raise ValueError(\n \"GPU training in not supported for Scikit-Learn. \"\n \"Please pick a different instance type from here: \"\n \"https://aws.amazon.com/ec2/instance-types/\"\n )\n", "path": "src/sagemaker/sklearn/estimator.py"}], "after_files": [{"content": "# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Placeholder docstring\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom sagemaker.estimator import Framework\nfrom sagemaker.fw_registry import default_framework_uri\nfrom sagemaker.fw_utils import (\n framework_name_from_image,\n empty_framework_version_warning,\n python_deprecation_warning,\n)\nfrom sagemaker.sklearn.defaults import SKLEARN_VERSION, SKLEARN_NAME\nfrom sagemaker.sklearn.model import SKLearnModel\nfrom sagemaker.vpc_utils import VPC_CONFIG_DEFAULT\n\nlogger = logging.getLogger(\"sagemaker\")\n\n\nclass SKLearn(Framework):\n \"\"\"Handle end-to-end training and deployment of custom Scikit-learn code.\"\"\"\n\n __framework_name__ = SKLEARN_NAME\n\n def __init__(\n self,\n entry_point,\n framework_version=SKLEARN_VERSION,\n source_dir=None,\n hyperparameters=None,\n py_version=\"py3\",\n image_name=None,\n **kwargs\n ):\n \"\"\"This ``Estimator`` executes an Scikit-learn script in a managed\n Scikit-learn execution environment, within a SageMaker Training Job. The\n managed Scikit-learn environment is an Amazon-built Docker container\n that executes functions defined in the supplied ``entry_point`` Python\n script.\n\n Training is started by calling\n :meth:`~sagemaker.amazon.estimator.Framework.fit` on this Estimator.\n After training is complete, calling\n :meth:`~sagemaker.amazon.estimator.Framework.deploy` creates a hosted\n SageMaker endpoint and returns an\n :class:`~sagemaker.amazon.sklearn.model.SKLearnPredictor` instance that\n can be used to perform inference against the hosted model.\n\n Technical documentation on preparing Scikit-learn scripts for\n SageMaker training and using the Scikit-learn Estimator is available on\n the project home-page: https://github.com/aws/sagemaker-python-sdk\n\n Args:\n entry_point (str): Path (absolute or relative) to the Python source\n file which should be executed as the entry point to training.\n This should be compatible with either Python 2.7 or Python 3.5.\n framework_version (str): Scikit-learn version you want to use for\n executing your model training code. List of supported versions\n https://github.com/aws/sagemaker-python-sdk#sklearn-sagemaker-estimators\n source_dir (str): Path (absolute or relative) to a directory with\n any other training source code dependencies aside from tne entry\n point file (default: None). Structure within this directory are\n preserved when training on Amazon SageMaker.\n hyperparameters (dict): Hyperparameters that will be used for\n training (default: None). The hyperparameters are made\n accessible as a dict[str, str] to the training code on\n SageMaker. For convenience, this accepts other types for keys\n and values, but ``str()`` will be called to convert them before\n training.\n py_version (str): Python version you want to use for executing your\n model training code (default: 'py2'). One of 'py2' or 'py3'.\n image_name (str): If specified, the estimator will use this image\n for training and hosting, instead of selecting the appropriate\n SageMaker official image based on framework_version and\n py_version. It can be an ECR url or dockerhub image and tag.\n Examples:\n 123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0\n custom-image:latest.\n **kwargs: Additional kwargs passed to the\n :class:`~sagemaker.estimator.Framework` constructor.\n \"\"\"\n # SciKit-Learn does not support distributed training or training on GPU instance types.\n # Fail fast.\n train_instance_type = kwargs.get(\"train_instance_type\")\n _validate_not_gpu_instance_type(train_instance_type)\n\n train_instance_count = kwargs.get(\"train_instance_count\")\n if train_instance_count:\n if train_instance_count != 1:\n raise AttributeError(\n \"Scikit-Learn does not support distributed training. \"\n \"Please remove the 'train_instance_count' argument or set \"\n \"'train_instance_count=1' when initializing SKLearn.\"\n )\n super(SKLearn, self).__init__(\n entry_point,\n source_dir,\n hyperparameters,\n image_name=image_name,\n **dict(kwargs, train_instance_count=1)\n )\n\n if py_version == \"py2\":\n logger.warning(python_deprecation_warning(self.__framework_name__))\n\n self.py_version = py_version\n\n if framework_version is None:\n logger.warning(empty_framework_version_warning(SKLEARN_VERSION, SKLEARN_VERSION))\n self.framework_version = framework_version or SKLEARN_VERSION\n\n if image_name is None:\n image_tag = \"{}-{}-{}\".format(framework_version, \"cpu\", py_version)\n self.image_name = default_framework_uri(\n SKLearn.__framework_name__, self.sagemaker_session.boto_region_name, image_tag\n )\n\n def create_model(\n self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT, **kwargs\n ):\n \"\"\"Create a SageMaker ``SKLearnModel`` object that can be deployed to an\n ``Endpoint``.\n\n Args:\n model_server_workers (int): Optional. The number of worker processes\n used by the inference server. If None, server will use one\n worker per vCPU.\n role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,\n which is also used during transform jobs. If not specified, the\n role from the Estimator will be used.\n vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on\n the model. Default: use subnets and security groups from this Estimator.\n * 'Subnets' (list[str]): List of subnet ids.\n * 'SecurityGroupIds' (list[str]): List of security group ids.\n **kwargs: Passed to initialization of ``SKLearnModel``.\n\n Returns:\n sagemaker.sklearn.model.SKLearnModel: A SageMaker ``SKLearnModel``\n object. See :func:`~sagemaker.sklearn.model.SKLearnModel` for full details.\n \"\"\"\n role = role or self.role\n\n # remove unwanted entry_point kwarg\n if \"entry_point\" in kwargs:\n logger.debug(\"removing unused entry_point argument: %s\", str(kwargs[\"entry_point\"]))\n kwargs = {k: v for k, v in kwargs.items() if k != \"entry_point\"}\n\n return SKLearnModel(\n self.model_data,\n role,\n self.entry_point,\n source_dir=self._model_source_dir(),\n enable_cloudwatch_metrics=self.enable_cloudwatch_metrics,\n name=self._current_job_name,\n container_log_level=self.container_log_level,\n code_location=self.code_location,\n py_version=self.py_version,\n framework_version=self.framework_version,\n model_server_workers=model_server_workers,\n image=self.image_name,\n sagemaker_session=self.sagemaker_session,\n vpc_config=self.get_vpc_config(vpc_config_override),\n **kwargs\n )\n\n @classmethod\n def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n \"\"\"Convert the job description to init params that can be handled by the\n class constructor\n\n Args:\n job_details: the returned job details from a describe_training_job\n API call.\n model_channel_name:\n\n Returns:\n dictionary: The transformed init_params\n \"\"\"\n init_params = super(SKLearn, cls)._prepare_init_params_from_job_description(job_details)\n\n image_name = init_params.pop(\"image\")\n framework, py_version, _, _ = framework_name_from_image(image_name)\n init_params[\"py_version\"] = py_version\n\n if framework and framework != cls.__framework_name__:\n training_job_name = init_params[\"base_job_name\"]\n raise ValueError(\n \"Training job: {} didn't use image for requested framework\".format(\n training_job_name\n )\n )\n if not framework:\n # If we were unable to parse the framework name from the image it is not one of our\n # officially supported images, in this case just add the image to the init params.\n init_params[\"image_name\"] = image_name\n return init_params\n\n\ndef _validate_not_gpu_instance_type(training_instance_type):\n \"\"\"\n Args:\n training_instance_type:\n \"\"\"\n gpu_instance_types = [\n \"ml.p2.xlarge\",\n \"ml.p2.8xlarge\",\n \"ml.p2.16xlarge\",\n \"ml.p3.xlarge\",\n \"ml.p3.8xlarge\",\n \"ml.p3.16xlarge\",\n ]\n\n if training_instance_type in gpu_instance_types:\n raise ValueError(\n \"GPU training in not supported for Scikit-Learn. \"\n \"Please pick a different instance type from here: \"\n \"https://aws.amazon.com/ec2/instance-types/\"\n )\n", "path": "src/sagemaker/sklearn/estimator.py"}]}
| 3,457 | 182 |
gh_patches_debug_1743
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3816
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conan crash if .count-files consist NULL-character
conan version 1.8.0, Windows 7 x64
For some reason (maybe a crash), the .count-files in each conan package in the cache were corrupted and contained a unicode NULL character (https://www.fileformat.info/info/unicode/char/0000/index.htm). This led to a conan crash for basically every package action (e.g. conan info).
I already prepared a fix (https://github.com/conan-io/conan/compare/develop...pianoslum:develop) which also adds a little bit more verbosity in case there is no number in the .count-file.
Is this enough info or should I elaborate?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/util/locks.py`
Content:
```
1 import fasteners
2 from conans.util.log import logger
3 import time
4 from conans.util.files import save, load
5 import os
6
7
8 class NoLock(object):
9
10 def __enter__(self):
11 pass
12
13 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
14 pass
15
16
17 class SimpleLock(object):
18
19 def __init__(self, filename):
20 self._lock = fasteners.InterProcessLock(filename, logger=logger)
21
22 def __enter__(self):
23 self._lock.acquire()
24
25 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
26 self._lock.release()
27
28
29 READ_BUSY_DELAY = 0.5
30 WRITE_BUSY_DELAY = 0.25
31
32
33 class Lock(object):
34
35 @staticmethod
36 def clean(folder):
37 if os.path.exists(folder + ".count"):
38 os.remove(folder + ".count")
39 if os.path.exists(folder + ".count.lock"):
40 os.remove(folder + ".count.lock")
41
42 def __init__(self, folder, locked_item, output):
43 self._count_file = folder + ".count"
44 self._count_lock_file = folder + ".count.lock"
45 self._locked_item = locked_item
46 self._output = output
47 self._first_lock = True
48
49 @property
50 def files(self):
51 return (self._count_file, self._count_lock_file)
52
53 def _info_locked(self):
54 if self._first_lock:
55 self._first_lock = False
56 self._output.info("%s is locked by another concurrent conan process, wait..."
57 % str(self._locked_item))
58 self._output.info("If not the case, quit, and do 'conan remove --locks'")
59
60 def _readers(self):
61 try:
62 return int(load(self._count_file))
63 except IOError:
64 return 0
65
66
67 class ReadLock(Lock):
68
69 def __enter__(self):
70 while True:
71 with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
72 readers = self._readers()
73 if readers >= 0:
74 save(self._count_file, str(readers + 1))
75 break
76 self._info_locked()
77 time.sleep(READ_BUSY_DELAY)
78
79 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
80 with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
81 readers = self._readers()
82 save(self._count_file, str(readers - 1))
83
84
85 class WriteLock(Lock):
86
87 def __enter__(self):
88 while True:
89 with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
90 readers = self._readers()
91 if readers == 0:
92 save(self._count_file, "-1")
93 break
94 self._info_locked()
95 time.sleep(WRITE_BUSY_DELAY)
96
97 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
98 with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
99 save(self._count_file, "0")
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/util/locks.py b/conans/util/locks.py
--- a/conans/util/locks.py
+++ b/conans/util/locks.py
@@ -60,7 +60,8 @@
def _readers(self):
try:
return int(load(self._count_file))
- except IOError:
+ except (IOError, UnicodeEncodeError, ValueError):
+ self._output.warn("%s does not contain a number!" % self._count_file)
return 0
|
{"golden_diff": "diff --git a/conans/util/locks.py b/conans/util/locks.py\n--- a/conans/util/locks.py\n+++ b/conans/util/locks.py\n@@ -60,7 +60,8 @@\n def _readers(self):\n try:\n return int(load(self._count_file))\n- except IOError:\n+ except (IOError, UnicodeEncodeError, ValueError):\n+ self._output.warn(\"%s does not contain a number!\" % self._count_file)\n return 0\n", "issue": "Conan crash if .count-files consist NULL-character\nconan version 1.8.0, Windows 7 x64\r\n\r\nFor some reason (maybe a crash), the .count-files in each conan package in the cache were corrupted and contained a unicode NULL character (https://www.fileformat.info/info/unicode/char/0000/index.htm). This led to a conan crash for basically every package action (e.g. conan info).\r\n\r\nI already prepared a fix (https://github.com/conan-io/conan/compare/develop...pianoslum:develop) which also adds a little bit more verbosity in case there is no number in the .count-file.\r\n\r\nIs this enough info or should I elaborate?\n", "before_files": [{"content": "import fasteners\nfrom conans.util.log import logger\nimport time\nfrom conans.util.files import save, load\nimport os\n\n\nclass NoLock(object):\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n pass\n\n\nclass SimpleLock(object):\n\n def __init__(self, filename):\n self._lock = fasteners.InterProcessLock(filename, logger=logger)\n\n def __enter__(self):\n self._lock.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n self._lock.release()\n\n\nREAD_BUSY_DELAY = 0.5\nWRITE_BUSY_DELAY = 0.25\n\n\nclass Lock(object):\n\n @staticmethod\n def clean(folder):\n if os.path.exists(folder + \".count\"):\n os.remove(folder + \".count\")\n if os.path.exists(folder + \".count.lock\"):\n os.remove(folder + \".count.lock\")\n\n def __init__(self, folder, locked_item, output):\n self._count_file = folder + \".count\"\n self._count_lock_file = folder + \".count.lock\"\n self._locked_item = locked_item\n self._output = output\n self._first_lock = True\n\n @property\n def files(self):\n return (self._count_file, self._count_lock_file)\n\n def _info_locked(self):\n if self._first_lock:\n self._first_lock = False\n self._output.info(\"%s is locked by another concurrent conan process, wait...\"\n % str(self._locked_item))\n self._output.info(\"If not the case, quit, and do 'conan remove --locks'\")\n\n def _readers(self):\n try:\n return int(load(self._count_file))\n except IOError:\n return 0\n\n\nclass ReadLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers >= 0:\n save(self._count_file, str(readers + 1))\n break\n self._info_locked()\n time.sleep(READ_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n save(self._count_file, str(readers - 1))\n\n\nclass WriteLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers == 0:\n save(self._count_file, \"-1\")\n break\n self._info_locked()\n time.sleep(WRITE_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n save(self._count_file, \"0\")\n", "path": "conans/util/locks.py"}], "after_files": [{"content": "import fasteners\nfrom conans.util.log import logger\nimport time\nfrom conans.util.files import save, load\nimport os\n\n\nclass NoLock(object):\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n pass\n\n\nclass SimpleLock(object):\n\n def __init__(self, filename):\n self._lock = fasteners.InterProcessLock(filename, logger=logger)\n\n def __enter__(self):\n self._lock.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n self._lock.release()\n\n\nREAD_BUSY_DELAY = 0.5\nWRITE_BUSY_DELAY = 0.25\n\n\nclass Lock(object):\n\n @staticmethod\n def clean(folder):\n if os.path.exists(folder + \".count\"):\n os.remove(folder + \".count\")\n if os.path.exists(folder + \".count.lock\"):\n os.remove(folder + \".count.lock\")\n\n def __init__(self, folder, locked_item, output):\n self._count_file = folder + \".count\"\n self._count_lock_file = folder + \".count.lock\"\n self._locked_item = locked_item\n self._output = output\n self._first_lock = True\n\n @property\n def files(self):\n return (self._count_file, self._count_lock_file)\n\n def _info_locked(self):\n if self._first_lock:\n self._first_lock = False\n self._output.info(\"%s is locked by another concurrent conan process, wait...\"\n % str(self._locked_item))\n self._output.info(\"If not the case, quit, and do 'conan remove --locks'\")\n\n def _readers(self):\n try:\n return int(load(self._count_file))\n except (IOError, UnicodeEncodeError, ValueError):\n self._output.warn(\"%s does not contain a number!\" % self._count_file)\n return 0\n\n\nclass ReadLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers >= 0:\n save(self._count_file, str(readers + 1))\n break\n self._info_locked()\n time.sleep(READ_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n save(self._count_file, str(readers - 1))\n\n\nclass WriteLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers == 0:\n save(self._count_file, \"-1\")\n break\n self._info_locked()\n time.sleep(WRITE_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n save(self._count_file, \"0\")\n", "path": "conans/util/locks.py"}]}
| 1,289 | 110 |
gh_patches_debug_15170
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-1082
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AmazonCoBuy dataset typo in API document
## 📚 Documentation
In API reference of `dgl.data.AmazonCoBuy` at https://docs.dgl.ai/en/latest/api/python/data.html#dgl.data.AmazonCoBuy, parameter `name` has to be ‘computer’ or ‘photo’. However, it should be ‘computer**s**’ or ‘photo’ according to the code:
https://github.com/dmlc/dgl/blob/fa0ee46a0146d21f46427fd9288dfe18074d6109/python/dgl/data/gnn_benckmark.py#L107-L125
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/dgl/data/gnn_benckmark.py`
Content:
```
1 import scipy.sparse as sp
2 import numpy as np
3 from dgl import graph_index, DGLGraph, transform
4 import os
5 from .utils import download, extract_archive, get_download_dir, _get_dgl_url
6
7 __all__=["AmazonCoBuy", "Coauthor", 'CoraFull']
8
9 def eliminate_self_loops(A):
10 """Remove self-loops from the adjacency matrix."""
11 A = A.tolil()
12 A.setdiag(0)
13 A = A.tocsr()
14 A.eliminate_zeros()
15 return A
16
17
18 class GNNBenchmarkDataset(object):
19 """Base Class for GNN Benchmark dataset from https://github.com/shchur/gnn-benchmark#datasets"""
20 _url = {}
21
22 def __init__(self, name):
23 assert name.lower() in self._url, "Name not valid"
24 self.dir = get_download_dir()
25 self.path = os.path.join(
26 self.dir, 'gnn_benckmark', self._url[name.lower()].split('/')[-1])
27 download(self._url[name.lower()], path=self.path)
28 g = self.load_npz(self.path)
29 self.data = [g]
30
31 @staticmethod
32 def load_npz(file_name):
33 with np.load(file_name) as loader:
34 loader = dict(loader)
35 num_nodes = loader['adj_shape'][0]
36 adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),
37 shape=loader['adj_shape']).tocoo()
38
39 if 'attr_data' in loader:
40 # Attributes are stored as a sparse CSR matrix
41 attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),
42 shape=loader['attr_shape']).todense()
43 elif 'attr_matrix' in loader:
44 # Attributes are stored as a (dense) np.ndarray
45 attr_matrix = loader['attr_matrix']
46 else:
47 attr_matrix = None
48
49 if 'labels_data' in loader:
50 # Labels are stored as a CSR matrix
51 labels = sp.csr_matrix((loader['labels_data'], loader['labels_indices'], loader['labels_indptr']),
52 shape=loader['labels_shape']).todense()
53 elif 'labels' in loader:
54 # Labels are stored as a numpy array
55 labels = loader['labels']
56 else:
57 labels = None
58 g = DGLGraph()
59 g.add_nodes(num_nodes)
60 g.add_edges(adj_matrix.row, adj_matrix.col)
61 g.add_edges(adj_matrix.col, adj_matrix.row)
62 g.ndata['feat'] = attr_matrix
63 g.ndata['label'] = labels
64 return g
65
66 def __getitem__(self, idx):
67 assert idx == 0, "This dataset has only one graph"
68 return self.data[0]
69
70 def __len__(self):
71 return len(self.data)
72
73
74 class CoraFull(GNNBenchmarkDataset):
75 r"""
76 Extended Cora dataset from `Deep Gaussian Embedding of Graphs:
77 Unsupervised Inductive Learning via Ranking`. Nodes represent paper and edges represent citations.
78
79 Reference: https://github.com/shchur/gnn-benchmark#datasets
80 """
81 _url = {"cora_full":'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/cora_full.npz'}
82
83 def __init__(self):
84 super().__init__("cora_full")
85
86
87 class Coauthor(GNNBenchmarkDataset):
88 r"""
89 Coauthor CS and Coauthor Physics are co-authorship graphs based on the Microsoft Academic Graph
90 from the KDD Cup 2016 challenge 3
91 . Here, nodes are authors, that are connected by an edge if they
92 co-authored a paper; node features represent paper keywords for each author’s papers, and class
93 labels indicate most active fields of study for each author.
94
95 Parameters
96 ---------------
97 name: str
98 Name of the dataset, has to be 'cs' or 'physics'
99
100 """
101 _url = {
102 'cs': "https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_cs.npz",
103 'physics': "https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_phy.npz"
104 }
105
106
107 class AmazonCoBuy(GNNBenchmarkDataset):
108 r"""
109 Amazon Computers and Amazon Photo are segments of the Amazon co-purchase graph [McAuley
110 et al., 2015], where nodes represent goods, edges indicate that two goods are frequently bought
111 together, node features are bag-of-words encoded product reviews, and class labels are given by the
112 product category.
113
114 Reference: https://github.com/shchur/gnn-benchmark#datasets
115
116 Parameters
117 ---------------
118 name: str
119 Name of the dataset, has to be 'computer' or 'photo'
120
121 """
122 _url = {
123 'computers': "https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_computers.npz",
124 'photo': "https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_photo.npz"
125 }
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/dgl/data/gnn_benckmark.py b/python/dgl/data/gnn_benckmark.py
--- a/python/dgl/data/gnn_benckmark.py
+++ b/python/dgl/data/gnn_benckmark.py
@@ -30,7 +30,7 @@
@staticmethod
def load_npz(file_name):
- with np.load(file_name) as loader:
+ with np.load(file_name, allow_pickle=True) as loader:
loader = dict(loader)
num_nodes = loader['adj_shape'][0]
adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),
@@ -116,7 +116,7 @@
Parameters
---------------
name: str
- Name of the dataset, has to be 'computer' or 'photo'
+ Name of the dataset, has to be 'computers' or 'photo'
"""
_url = {
|
{"golden_diff": "diff --git a/python/dgl/data/gnn_benckmark.py b/python/dgl/data/gnn_benckmark.py\n--- a/python/dgl/data/gnn_benckmark.py\n+++ b/python/dgl/data/gnn_benckmark.py\n@@ -30,7 +30,7 @@\n \n @staticmethod\n def load_npz(file_name):\n- with np.load(file_name) as loader:\n+ with np.load(file_name, allow_pickle=True) as loader:\n loader = dict(loader)\n num_nodes = loader['adj_shape'][0]\n adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),\n@@ -116,7 +116,7 @@\n Parameters\n ---------------\n name: str\n- Name of the dataset, has to be 'computer' or 'photo'\n+ Name of the dataset, has to be 'computers' or 'photo'\n \n \"\"\"\n _url = {\n", "issue": "AmazonCoBuy dataset typo in API document\n## \ud83d\udcda Documentation\r\n\r\nIn API reference of `dgl.data.AmazonCoBuy` at https://docs.dgl.ai/en/latest/api/python/data.html#dgl.data.AmazonCoBuy, parameter `name` has to be \u2018computer\u2019 or \u2018photo\u2019. However, it should be \u2018computer**s**\u2019 or \u2018photo\u2019 according to the code:\r\n\r\nhttps://github.com/dmlc/dgl/blob/fa0ee46a0146d21f46427fd9288dfe18074d6109/python/dgl/data/gnn_benckmark.py#L107-L125\r\n\r\n\n", "before_files": [{"content": "import scipy.sparse as sp\nimport numpy as np\nfrom dgl import graph_index, DGLGraph, transform\nimport os\nfrom .utils import download, extract_archive, get_download_dir, _get_dgl_url\n\n__all__=[\"AmazonCoBuy\", \"Coauthor\", 'CoraFull']\n\ndef eliminate_self_loops(A):\n \"\"\"Remove self-loops from the adjacency matrix.\"\"\"\n A = A.tolil()\n A.setdiag(0)\n A = A.tocsr()\n A.eliminate_zeros()\n return A\n\n\nclass GNNBenchmarkDataset(object):\n \"\"\"Base Class for GNN Benchmark dataset from https://github.com/shchur/gnn-benchmark#datasets\"\"\"\n _url = {}\n\n def __init__(self, name):\n assert name.lower() in self._url, \"Name not valid\"\n self.dir = get_download_dir()\n self.path = os.path.join(\n self.dir, 'gnn_benckmark', self._url[name.lower()].split('/')[-1])\n download(self._url[name.lower()], path=self.path)\n g = self.load_npz(self.path)\n self.data = [g]\n\n @staticmethod\n def load_npz(file_name):\n with np.load(file_name) as loader:\n loader = dict(loader)\n num_nodes = loader['adj_shape'][0]\n adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),\n shape=loader['adj_shape']).tocoo()\n\n if 'attr_data' in loader:\n # Attributes are stored as a sparse CSR matrix\n attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),\n shape=loader['attr_shape']).todense()\n elif 'attr_matrix' in loader:\n # Attributes are stored as a (dense) np.ndarray\n attr_matrix = loader['attr_matrix']\n else:\n attr_matrix = None\n\n if 'labels_data' in loader:\n # Labels are stored as a CSR matrix\n labels = sp.csr_matrix((loader['labels_data'], loader['labels_indices'], loader['labels_indptr']),\n shape=loader['labels_shape']).todense()\n elif 'labels' in loader:\n # Labels are stored as a numpy array\n labels = loader['labels']\n else:\n labels = None\n g = DGLGraph()\n g.add_nodes(num_nodes)\n g.add_edges(adj_matrix.row, adj_matrix.col)\n g.add_edges(adj_matrix.col, adj_matrix.row)\n g.ndata['feat'] = attr_matrix\n g.ndata['label'] = labels\n return g \n\n def __getitem__(self, idx):\n assert idx == 0, \"This dataset has only one graph\"\n return self.data[0]\n\n def __len__(self):\n return len(self.data)\n\n\nclass CoraFull(GNNBenchmarkDataset):\n r\"\"\"\n Extended Cora dataset from `Deep Gaussian Embedding of Graphs: \n Unsupervised Inductive Learning via Ranking`. Nodes represent paper and edges represent citations.\n\n Reference: https://github.com/shchur/gnn-benchmark#datasets\n \"\"\"\n _url = {\"cora_full\":'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/cora_full.npz'}\n\n def __init__(self):\n super().__init__(\"cora_full\")\n\n\nclass Coauthor(GNNBenchmarkDataset):\n r\"\"\"\n Coauthor CS and Coauthor Physics are co-authorship graphs based on the Microsoft Academic Graph\n from the KDD Cup 2016 challenge 3\n . Here, nodes are authors, that are connected by an edge if they\n co-authored a paper; node features represent paper keywords for each author\u2019s papers, and class\n labels indicate most active fields of study for each author.\n\n Parameters\n ---------------\n name: str\n Name of the dataset, has to be 'cs' or 'physics'\n\n \"\"\"\n _url = {\n 'cs': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_cs.npz\",\n 'physics': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_phy.npz\"\n }\n\n\nclass AmazonCoBuy(GNNBenchmarkDataset):\n r\"\"\"\n Amazon Computers and Amazon Photo are segments of the Amazon co-purchase graph [McAuley\n et al., 2015], where nodes represent goods, edges indicate that two goods are frequently bought\n together, node features are bag-of-words encoded product reviews, and class labels are given by the\n product category.\n\n Reference: https://github.com/shchur/gnn-benchmark#datasets\n\n Parameters\n ---------------\n name: str\n Name of the dataset, has to be 'computer' or 'photo'\n\n \"\"\"\n _url = {\n 'computers': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_computers.npz\",\n 'photo': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_photo.npz\"\n }\n", "path": "python/dgl/data/gnn_benckmark.py"}], "after_files": [{"content": "import scipy.sparse as sp\nimport numpy as np\nfrom dgl import graph_index, DGLGraph, transform\nimport os\nfrom .utils import download, extract_archive, get_download_dir, _get_dgl_url\n\n__all__=[\"AmazonCoBuy\", \"Coauthor\", 'CoraFull']\n\ndef eliminate_self_loops(A):\n \"\"\"Remove self-loops from the adjacency matrix.\"\"\"\n A = A.tolil()\n A.setdiag(0)\n A = A.tocsr()\n A.eliminate_zeros()\n return A\n\n\nclass GNNBenchmarkDataset(object):\n \"\"\"Base Class for GNN Benchmark dataset from https://github.com/shchur/gnn-benchmark#datasets\"\"\"\n _url = {}\n\n def __init__(self, name):\n assert name.lower() in self._url, \"Name not valid\"\n self.dir = get_download_dir()\n self.path = os.path.join(\n self.dir, 'gnn_benckmark', self._url[name.lower()].split('/')[-1])\n download(self._url[name.lower()], path=self.path)\n g = self.load_npz(self.path)\n self.data = [g]\n\n @staticmethod\n def load_npz(file_name):\n with np.load(file_name, allow_pickle=True) as loader:\n loader = dict(loader)\n num_nodes = loader['adj_shape'][0]\n adj_matrix = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),\n shape=loader['adj_shape']).tocoo()\n\n if 'attr_data' in loader:\n # Attributes are stored as a sparse CSR matrix\n attr_matrix = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),\n shape=loader['attr_shape']).todense()\n elif 'attr_matrix' in loader:\n # Attributes are stored as a (dense) np.ndarray\n attr_matrix = loader['attr_matrix']\n else:\n attr_matrix = None\n\n if 'labels_data' in loader:\n # Labels are stored as a CSR matrix\n labels = sp.csr_matrix((loader['labels_data'], loader['labels_indices'], loader['labels_indptr']),\n shape=loader['labels_shape']).todense()\n elif 'labels' in loader:\n # Labels are stored as a numpy array\n labels = loader['labels']\n else:\n labels = None\n g = DGLGraph()\n g.add_nodes(num_nodes)\n g.add_edges(adj_matrix.row, adj_matrix.col)\n g.add_edges(adj_matrix.col, adj_matrix.row)\n g.ndata['feat'] = attr_matrix\n g.ndata['label'] = labels\n return g \n\n def __getitem__(self, idx):\n assert idx == 0, \"This dataset has only one graph\"\n return self.data[0]\n\n def __len__(self):\n return len(self.data)\n\n\nclass CoraFull(GNNBenchmarkDataset):\n r\"\"\"\n Extended Cora dataset from `Deep Gaussian Embedding of Graphs: \n Unsupervised Inductive Learning via Ranking`. Nodes represent paper and edges represent citations.\n\n Reference: https://github.com/shchur/gnn-benchmark#datasets\n \"\"\"\n _url = {\"cora_full\":'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/cora_full.npz'}\n\n def __init__(self):\n super().__init__(\"cora_full\")\n\n\nclass Coauthor(GNNBenchmarkDataset):\n r\"\"\"\n Coauthor CS and Coauthor Physics are co-authorship graphs based on the Microsoft Academic Graph\n from the KDD Cup 2016 challenge 3\n . Here, nodes are authors, that are connected by an edge if they\n co-authored a paper; node features represent paper keywords for each author\u2019s papers, and class\n labels indicate most active fields of study for each author.\n\n Parameters\n ---------------\n name: str\n Name of the dataset, has to be 'cs' or 'physics'\n\n \"\"\"\n _url = {\n 'cs': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_cs.npz\",\n 'physics': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/ms_academic_phy.npz\"\n }\n\n\nclass AmazonCoBuy(GNNBenchmarkDataset):\n r\"\"\"\n Amazon Computers and Amazon Photo are segments of the Amazon co-purchase graph [McAuley\n et al., 2015], where nodes represent goods, edges indicate that two goods are frequently bought\n together, node features are bag-of-words encoded product reviews, and class labels are given by the\n product category.\n\n Reference: https://github.com/shchur/gnn-benchmark#datasets\n\n Parameters\n ---------------\n name: str\n Name of the dataset, has to be 'computers' or 'photo'\n\n \"\"\"\n _url = {\n 'computers': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_computers.npz\",\n 'photo': \"https://github.com/shchur/gnn-benchmark/raw/master/data/npz/amazon_electronics_photo.npz\"\n }\n", "path": "python/dgl/data/gnn_benckmark.py"}]}
| 1,825 | 217 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.