problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_1354
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-603
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Tiny enhancement request] Allow digit keys as shortkeys
Feature description
---------
English letters are allowed as shortkeys for annotation now, only.
Proposition: allow English letters and digits as shortkeys.
</issue>
<code>
[start of app/api/models.py]
1 import string
2
3 from django.db import models
4 from django.dispatch import receiver
5 from django.db.models.signals import post_save, pre_delete
6 from django.urls import reverse
7 from django.conf import settings
8 from django.contrib.auth.models import User
9 from django.contrib.staticfiles.storage import staticfiles_storage
10 from django.core.exceptions import ValidationError
11 from polymorphic.models import PolymorphicModel
12
13 from .managers import AnnotationManager, Seq2seqAnnotationManager
14
15 DOCUMENT_CLASSIFICATION = 'DocumentClassification'
16 SEQUENCE_LABELING = 'SequenceLabeling'
17 SEQ2SEQ = 'Seq2seq'
18 PROJECT_CHOICES = (
19 (DOCUMENT_CLASSIFICATION, 'document classification'),
20 (SEQUENCE_LABELING, 'sequence labeling'),
21 (SEQ2SEQ, 'sequence to sequence'),
22 )
23
24
25 class Project(PolymorphicModel):
26 name = models.CharField(max_length=100)
27 description = models.TextField(default='')
28 guideline = models.TextField(default='')
29 created_at = models.DateTimeField(auto_now_add=True)
30 updated_at = models.DateTimeField(auto_now=True)
31 users = models.ManyToManyField(User, related_name='projects')
32 project_type = models.CharField(max_length=30, choices=PROJECT_CHOICES)
33 randomize_document_order = models.BooleanField(default=False)
34 collaborative_annotation = models.BooleanField(default=False)
35
36 def get_absolute_url(self):
37 return reverse('upload', args=[self.id])
38
39 @property
40 def image(self):
41 raise NotImplementedError()
42
43 def get_bundle_name(self):
44 raise NotImplementedError()
45
46 def get_bundle_name_upload(self):
47 raise NotImplementedError()
48
49 def get_bundle_name_download(self):
50 raise NotImplementedError()
51
52 def get_annotation_serializer(self):
53 raise NotImplementedError()
54
55 def get_annotation_class(self):
56 raise NotImplementedError()
57
58 def get_storage(self, data):
59 raise NotImplementedError()
60
61 def __str__(self):
62 return self.name
63
64
65 class TextClassificationProject(Project):
66
67 @property
68 def image(self):
69 return staticfiles_storage.url('assets/images/cats/text_classification.jpg')
70
71 def get_bundle_name(self):
72 return 'document_classification'
73
74 def get_bundle_name_upload(self):
75 return 'upload_text_classification'
76
77 def get_bundle_name_download(self):
78 return 'download_text_classification'
79
80 def get_annotation_serializer(self):
81 from .serializers import DocumentAnnotationSerializer
82 return DocumentAnnotationSerializer
83
84 def get_annotation_class(self):
85 return DocumentAnnotation
86
87 def get_storage(self, data):
88 from .utils import ClassificationStorage
89 return ClassificationStorage(data, self)
90
91
92 class SequenceLabelingProject(Project):
93
94 @property
95 def image(self):
96 return staticfiles_storage.url('assets/images/cats/sequence_labeling.jpg')
97
98 def get_bundle_name(self):
99 return 'sequence_labeling'
100
101 def get_bundle_name_upload(self):
102 return 'upload_sequence_labeling'
103
104 def get_bundle_name_download(self):
105 return 'download_sequence_labeling'
106
107 def get_annotation_serializer(self):
108 from .serializers import SequenceAnnotationSerializer
109 return SequenceAnnotationSerializer
110
111 def get_annotation_class(self):
112 return SequenceAnnotation
113
114 def get_storage(self, data):
115 from .utils import SequenceLabelingStorage
116 return SequenceLabelingStorage(data, self)
117
118
119 class Seq2seqProject(Project):
120
121 @property
122 def image(self):
123 return staticfiles_storage.url('assets/images/cats/seq2seq.jpg')
124
125 def get_bundle_name(self):
126 return 'seq2seq'
127
128 def get_bundle_name_upload(self):
129 return 'upload_seq2seq'
130
131 def get_bundle_name_download(self):
132 return 'download_seq2seq'
133
134 def get_annotation_serializer(self):
135 from .serializers import Seq2seqAnnotationSerializer
136 return Seq2seqAnnotationSerializer
137
138 def get_annotation_class(self):
139 return Seq2seqAnnotation
140
141 def get_storage(self, data):
142 from .utils import Seq2seqStorage
143 return Seq2seqStorage(data, self)
144
145
146 class Label(models.Model):
147 PREFIX_KEYS = (
148 ('ctrl', 'ctrl'),
149 ('shift', 'shift'),
150 ('ctrl shift', 'ctrl shift')
151 )
152 SUFFIX_KEYS = tuple(
153 (c, c) for c in string.ascii_lowercase
154 )
155
156 text = models.CharField(max_length=100)
157 prefix_key = models.CharField(max_length=10, blank=True, null=True, choices=PREFIX_KEYS)
158 suffix_key = models.CharField(max_length=1, blank=True, null=True, choices=SUFFIX_KEYS)
159 project = models.ForeignKey(Project, related_name='labels', on_delete=models.CASCADE)
160 background_color = models.CharField(max_length=7, default='#209cee')
161 text_color = models.CharField(max_length=7, default='#ffffff')
162 created_at = models.DateTimeField(auto_now_add=True)
163 updated_at = models.DateTimeField(auto_now=True)
164
165 def __str__(self):
166 return self.text
167
168 def clean(self):
169 # Don't allow shortcut key not to have a suffix key.
170 if self.prefix_key and not self.suffix_key:
171 raise ValidationError('Shortcut key may not have a suffix key.')
172
173 # each shortcut (prefix key + suffix key) can only be assigned to one label
174 if self.suffix_key or self.prefix_key:
175 other_labels = self.project.labels.exclude(id=self.id)
176 if other_labels.filter(suffix_key=self.suffix_key, prefix_key=self.prefix_key).exists():
177 raise ValidationError('A label with this shortcut already exists in the project')
178
179 super().clean()
180
181 class Meta:
182 unique_together = (
183 ('project', 'text'),
184 )
185
186
187 class Document(models.Model):
188 text = models.TextField()
189 project = models.ForeignKey(Project, related_name='documents', on_delete=models.CASCADE)
190 meta = models.TextField(default='{}')
191 created_at = models.DateTimeField(auto_now_add=True)
192 updated_at = models.DateTimeField(auto_now=True)
193 annotations_approved_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
194
195 def __str__(self):
196 return self.text[:50]
197
198
199 class Annotation(models.Model):
200 objects = AnnotationManager()
201
202 prob = models.FloatField(default=0.0)
203 manual = models.BooleanField(default=False)
204 user = models.ForeignKey(User, on_delete=models.CASCADE)
205 created_at = models.DateTimeField(auto_now_add=True)
206 updated_at = models.DateTimeField(auto_now=True)
207
208 class Meta:
209 abstract = True
210
211
212 class DocumentAnnotation(Annotation):
213 document = models.ForeignKey(Document, related_name='doc_annotations', on_delete=models.CASCADE)
214 label = models.ForeignKey(Label, on_delete=models.CASCADE)
215
216 class Meta:
217 unique_together = ('document', 'user', 'label')
218
219
220 class SequenceAnnotation(Annotation):
221 document = models.ForeignKey(Document, related_name='seq_annotations', on_delete=models.CASCADE)
222 label = models.ForeignKey(Label, on_delete=models.CASCADE)
223 start_offset = models.IntegerField()
224 end_offset = models.IntegerField()
225
226 def clean(self):
227 if self.start_offset >= self.end_offset:
228 raise ValidationError('start_offset is after end_offset')
229
230 class Meta:
231 unique_together = ('document', 'user', 'label', 'start_offset', 'end_offset')
232
233
234 class Seq2seqAnnotation(Annotation):
235 # Override AnnotationManager for custom functionality
236 objects = Seq2seqAnnotationManager()
237
238 document = models.ForeignKey(Document, related_name='seq2seq_annotations', on_delete=models.CASCADE)
239 text = models.CharField(max_length=500)
240
241 class Meta:
242 unique_together = ('document', 'user', 'text')
243
244
245 class Role(models.Model):
246 name = models.CharField(max_length=100, unique=True)
247 description = models.TextField(default='')
248 created_at = models.DateTimeField(auto_now_add=True)
249 updated_at = models.DateTimeField(auto_now=True)
250
251 def __str__(self):
252 return self.name
253
254
255 class RoleMapping(models.Model):
256 user = models.ForeignKey(User, related_name='role_mappings', on_delete=models.CASCADE)
257 project = models.ForeignKey(Project, related_name='role_mappings', on_delete=models.CASCADE)
258 role = models.ForeignKey(Role, on_delete=models.CASCADE)
259 created_at = models.DateTimeField(auto_now_add=True)
260 updated_at = models.DateTimeField(auto_now=True)
261
262 def clean(self):
263 other_rolemappings = self.project.role_mappings.exclude(id=self.id)
264
265 if other_rolemappings.filter(user=self.user, project=self.project).exists():
266 raise ValidationError('This user is already assigned to a role in this project.')
267
268 class Meta:
269 unique_together = ("user", "project", "role")
270
271
272 @receiver(post_save, sender=RoleMapping)
273 def add_linked_project(sender, instance, created, **kwargs):
274 if not created:
275 return
276 userInstance = instance.user
277 projectInstance = instance.project
278 if userInstance and projectInstance:
279 user = User.objects.get(pk=userInstance.pk)
280 project = Project.objects.get(pk=projectInstance.pk)
281 user.projects.add(project)
282 user.save()
283
284
285 @receiver(post_save)
286 def add_superusers_to_project(sender, instance, created, **kwargs):
287 if not created:
288 return
289 if sender not in Project.__subclasses__():
290 return
291 superusers = User.objects.filter(is_superuser=True)
292 admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
293 if superusers and admin_role:
294 RoleMapping.objects.bulk_create(
295 [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)
296 for superuser in superusers]
297 )
298
299
300 @receiver(post_save, sender=User)
301 def add_new_superuser_to_projects(sender, instance, created, **kwargs):
302 if created and instance.is_superuser:
303 admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
304 projects = Project.objects.all()
305 if admin_role and projects:
306 RoleMapping.objects.bulk_create(
307 [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)
308 for project in projects]
309 )
310
311
312 @receiver(pre_delete, sender=RoleMapping)
313 def delete_linked_project(sender, instance, using, **kwargs):
314 userInstance = instance.user
315 projectInstance = instance.project
316 if userInstance and projectInstance:
317 user = User.objects.get(pk=userInstance.pk)
318 project = Project.objects.get(pk=projectInstance.pk)
319 user.projects.remove(project)
320 user.save()
321
[end of app/api/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/models.py b/app/api/models.py
--- a/app/api/models.py
+++ b/app/api/models.py
@@ -150,7 +150,7 @@
('ctrl shift', 'ctrl shift')
)
SUFFIX_KEYS = tuple(
- (c, c) for c in string.ascii_lowercase
+ (c, c) for c in string.digits + string.ascii_lowercase
)
text = models.CharField(max_length=100)
|
{"golden_diff": "diff --git a/app/api/models.py b/app/api/models.py\n--- a/app/api/models.py\n+++ b/app/api/models.py\n@@ -150,7 +150,7 @@\n ('ctrl shift', 'ctrl shift')\n )\n SUFFIX_KEYS = tuple(\n- (c, c) for c in string.ascii_lowercase\n+ (c, c) for c in string.digits + string.ascii_lowercase\n )\n \n text = models.CharField(max_length=100)\n", "issue": "[Tiny enhancement request] Allow digit keys as shortkeys\nFeature description\r\n---------\r\nEnglish letters are allowed as shortkeys for annotation now, only.\r\n\r\nProposition: allow English letters and digits as shortkeys.\n", "before_files": [{"content": "import string\n\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.core.exceptions import ValidationError\nfrom polymorphic.models import PolymorphicModel\n\nfrom .managers import AnnotationManager, Seq2seqAnnotationManager\n\nDOCUMENT_CLASSIFICATION = 'DocumentClassification'\nSEQUENCE_LABELING = 'SequenceLabeling'\nSEQ2SEQ = 'Seq2seq'\nPROJECT_CHOICES = (\n (DOCUMENT_CLASSIFICATION, 'document classification'),\n (SEQUENCE_LABELING, 'sequence labeling'),\n (SEQ2SEQ, 'sequence to sequence'),\n)\n\n\nclass Project(PolymorphicModel):\n name = models.CharField(max_length=100)\n description = models.TextField(default='')\n guideline = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n users = models.ManyToManyField(User, related_name='projects')\n project_type = models.CharField(max_length=30, choices=PROJECT_CHOICES)\n randomize_document_order = models.BooleanField(default=False)\n collaborative_annotation = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n return reverse('upload', args=[self.id])\n\n @property\n def image(self):\n raise NotImplementedError()\n\n def get_bundle_name(self):\n raise NotImplementedError()\n\n def get_bundle_name_upload(self):\n raise NotImplementedError()\n\n def get_bundle_name_download(self):\n raise NotImplementedError()\n\n def get_annotation_serializer(self):\n raise NotImplementedError()\n\n def get_annotation_class(self):\n raise NotImplementedError()\n\n def get_storage(self, data):\n raise NotImplementedError()\n\n def __str__(self):\n return self.name\n\n\nclass TextClassificationProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/text_classification.jpg')\n\n def get_bundle_name(self):\n return 'document_classification'\n\n def get_bundle_name_upload(self):\n return 'upload_text_classification'\n\n def get_bundle_name_download(self):\n return 'download_text_classification'\n\n def get_annotation_serializer(self):\n from .serializers import DocumentAnnotationSerializer\n return DocumentAnnotationSerializer\n\n def get_annotation_class(self):\n return DocumentAnnotation\n\n def get_storage(self, data):\n from .utils import ClassificationStorage\n return ClassificationStorage(data, self)\n\n\nclass SequenceLabelingProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/sequence_labeling.jpg')\n\n def get_bundle_name(self):\n return 'sequence_labeling'\n\n def get_bundle_name_upload(self):\n return 'upload_sequence_labeling'\n\n def get_bundle_name_download(self):\n return 'download_sequence_labeling'\n\n def get_annotation_serializer(self):\n from .serializers import SequenceAnnotationSerializer\n return SequenceAnnotationSerializer\n\n def get_annotation_class(self):\n return SequenceAnnotation\n\n def get_storage(self, data):\n from .utils import SequenceLabelingStorage\n return SequenceLabelingStorage(data, self)\n\n\nclass Seq2seqProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/seq2seq.jpg')\n\n def get_bundle_name(self):\n return 'seq2seq'\n\n def get_bundle_name_upload(self):\n return 'upload_seq2seq'\n\n def get_bundle_name_download(self):\n return 'download_seq2seq'\n\n def get_annotation_serializer(self):\n from .serializers import Seq2seqAnnotationSerializer\n return Seq2seqAnnotationSerializer\n\n def get_annotation_class(self):\n return Seq2seqAnnotation\n\n def get_storage(self, data):\n from .utils import Seq2seqStorage\n return Seq2seqStorage(data, self)\n\n\nclass Label(models.Model):\n PREFIX_KEYS = (\n ('ctrl', 'ctrl'),\n ('shift', 'shift'),\n ('ctrl shift', 'ctrl shift')\n )\n SUFFIX_KEYS = tuple(\n (c, c) for c in string.ascii_lowercase\n )\n\n text = models.CharField(max_length=100)\n prefix_key = models.CharField(max_length=10, blank=True, null=True, choices=PREFIX_KEYS)\n suffix_key = models.CharField(max_length=1, blank=True, null=True, choices=SUFFIX_KEYS)\n project = models.ForeignKey(Project, related_name='labels', on_delete=models.CASCADE)\n background_color = models.CharField(max_length=7, default='#209cee')\n text_color = models.CharField(max_length=7, default='#ffffff')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.text\n\n def clean(self):\n # Don't allow shortcut key not to have a suffix key.\n if self.prefix_key and not self.suffix_key:\n raise ValidationError('Shortcut key may not have a suffix key.')\n\n # each shortcut (prefix key + suffix key) can only be assigned to one label\n if self.suffix_key or self.prefix_key:\n other_labels = self.project.labels.exclude(id=self.id)\n if other_labels.filter(suffix_key=self.suffix_key, prefix_key=self.prefix_key).exists():\n raise ValidationError('A label with this shortcut already exists in the project')\n\n super().clean()\n\n class Meta:\n unique_together = (\n ('project', 'text'),\n )\n\n\nclass Document(models.Model):\n text = models.TextField()\n project = models.ForeignKey(Project, related_name='documents', on_delete=models.CASCADE)\n meta = models.TextField(default='{}')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n annotations_approved_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n\n def __str__(self):\n return self.text[:50]\n\n\nclass Annotation(models.Model):\n objects = AnnotationManager()\n\n prob = models.FloatField(default=0.0)\n manual = models.BooleanField(default=False)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DocumentAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='doc_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('document', 'user', 'label')\n\n\nclass SequenceAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='seq_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n start_offset = models.IntegerField()\n end_offset = models.IntegerField()\n\n def clean(self):\n if self.start_offset >= self.end_offset:\n raise ValidationError('start_offset is after end_offset')\n\n class Meta:\n unique_together = ('document', 'user', 'label', 'start_offset', 'end_offset')\n\n\nclass Seq2seqAnnotation(Annotation):\n # Override AnnotationManager for custom functionality\n objects = Seq2seqAnnotationManager()\n\n document = models.ForeignKey(Document, related_name='seq2seq_annotations', on_delete=models.CASCADE)\n text = models.CharField(max_length=500)\n\n class Meta:\n unique_together = ('document', 'user', 'text')\n\n\nclass Role(models.Model):\n name = models.CharField(max_length=100, unique=True)\n description = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass RoleMapping(models.Model):\n user = models.ForeignKey(User, related_name='role_mappings', on_delete=models.CASCADE)\n project = models.ForeignKey(Project, related_name='role_mappings', on_delete=models.CASCADE)\n role = models.ForeignKey(Role, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def clean(self):\n other_rolemappings = self.project.role_mappings.exclude(id=self.id)\n\n if other_rolemappings.filter(user=self.user, project=self.project).exists():\n raise ValidationError('This user is already assigned to a role in this project.')\n\n class Meta:\n unique_together = (\"user\", \"project\", \"role\")\n\n\n@receiver(post_save, sender=RoleMapping)\ndef add_linked_project(sender, instance, created, **kwargs):\n if not created:\n return\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.add(project)\n user.save()\n\n\n@receiver(post_save)\ndef add_superusers_to_project(sender, instance, created, **kwargs):\n if not created:\n return\n if sender not in Project.__subclasses__():\n return\n superusers = User.objects.filter(is_superuser=True)\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n if superusers and admin_role:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)\n for superuser in superusers]\n )\n\n\n@receiver(post_save, sender=User)\ndef add_new_superuser_to_projects(sender, instance, created, **kwargs):\n if created and instance.is_superuser:\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n projects = Project.objects.all()\n if admin_role and projects:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)\n for project in projects]\n )\n\n\n@receiver(pre_delete, sender=RoleMapping)\ndef delete_linked_project(sender, instance, using, **kwargs):\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.remove(project)\n user.save()\n", "path": "app/api/models.py"}]}
| 3,626 | 107 |
gh_patches_debug_42719
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-993
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Question about using model buddles.
Hi,
thank you for sharing the trained buddles.
However, when I tried ISPRS Potsdam segementation buddle, I can not get the prediction.
The command is here and you can see the error message.
The buddle is downloaded form your example homepage.
```
rastervision predict $buddle $image_uri $label_uri
```
ERROR:
```
FileNotFoundError: [Errno 2] No such file or directory: '/opt/data/tmp/tmp6ut8_2hy/tmpa7bi6qk2'
```
and
```
rastervision.pipeline.file_system.file_system.NotReadableError: Could not read s3://raster-vision-raw-data/isprs-potsdam/4_Ortho_RGBIR/top_potsdam_2_10_RGBIR.tif
```
I just use local buddle and image. Can you tell me what is wrong with it?
The same command shows no problem when using other buddles.
Thanks.
</issue>
<code>
[start of rastervision_core/rastervision/core/data/scene_config.py]
1 from typing import Optional, List
2
3 from shapely.geometry import shape
4
5 from rastervision.pipeline.config import Config, register_config, Field
6 from rastervision.core.data.raster_source import RasterSourceConfig
7 from rastervision.core.data.label_source import LabelSourceConfig
8 from rastervision.core.data.label_store import LabelStoreConfig
9 from rastervision.core.data.scene import Scene
10 from rastervision.core.data.vector_source import GeoJSONVectorSourceConfig
11
12
13 @register_config('scene')
14 class SceneConfig(Config):
15 """Config for a Scene which comprises the raster data and labels for an AOI."""
16 id: str
17 raster_source: RasterSourceConfig
18 label_source: LabelSourceConfig
19 label_store: Optional[LabelStoreConfig] = None
20 aoi_uris: Optional[List[str]] = Field(
21 None,
22 description=
23 ('List of URIs of GeoJSON files that define the AOIs for the scene. Each polygon'
24 'defines an AOI which is a piece of the scene that is assumed to be fully '
25 'labeled and usable for training or validation.'))
26
27 def build(self, class_config, tmp_dir, use_transformers=True):
28 raster_source = self.raster_source.build(
29 tmp_dir, use_transformers=use_transformers)
30 crs_transformer = raster_source.get_crs_transformer()
31 extent = raster_source.get_extent()
32
33 label_source = (self.label_source.build(class_config, crs_transformer,
34 extent, tmp_dir)
35 if self.label_source is not None else None)
36 label_store = (self.label_store.build(class_config, crs_transformer,
37 extent, tmp_dir)
38 if self.label_store is not None else None)
39
40 aoi_polygons = None
41 if self.aoi_uris is not None:
42 aoi_polygons = []
43 for uri in self.aoi_uris:
44 # Set default class id to 0 to avoid deleting features. If it was
45 # set to None, they would all be deleted.
46 aoi_geojson = GeoJSONVectorSourceConfig(
47 uri=uri, default_class_id=0, ignore_crs_field=True).build(
48 class_config, crs_transformer).get_geojson()
49 for f in aoi_geojson['features']:
50 aoi_polygons.append(shape(f['geometry']))
51
52 return Scene(
53 self.id,
54 raster_source,
55 ground_truth_label_source=label_source,
56 prediction_label_store=label_store,
57 aoi_polygons=aoi_polygons)
58
59 def update(self, pipeline=None):
60 super().update()
61
62 self.raster_source.update(pipeline=pipeline, scene=self)
63 self.label_source.update(pipeline=pipeline, scene=self)
64 if self.label_store is None and pipeline is not None:
65 self.label_store = pipeline.get_default_label_store(scene=self)
66 if self.label_store is not None:
67 self.label_store.update(pipeline=pipeline, scene=self)
68
[end of rastervision_core/rastervision/core/data/scene_config.py]
[start of rastervision_core/rastervision/core/predictor.py]
1 from os.path import join
2 import zipfile
3 import logging
4
5 from rastervision.pipeline import rv_config
6 from rastervision.pipeline.config import (build_config, upgrade_config)
7 from rastervision.pipeline.file_system.utils import (download_if_needed,
8 make_dir, file_to_json)
9 from rastervision.core.data.raster_source import ChannelOrderError
10 from rastervision.core.analyzer import StatsAnalyzerConfig
11
12 log = logging.getLogger(__name__)
13
14
15 class Predictor():
16 """Class for making predictions based off of a model bundle."""
17
18 def __init__(self,
19 model_bundle_uri,
20 tmp_dir,
21 update_stats=False,
22 channel_order=None):
23 """Creates a new Predictor.
24
25 Args:
26 model_bundle_uri: URI of the model bundle to use. Can be any
27 type of URI that Raster Vision can read.
28 tmp_dir: Temporary directory in which to store files that are used
29 by the Predictor. This directory is not cleaned up by this
30 class.
31 channel_order: Option for a new channel order to use for the
32 imagery being predicted against. If not present, the
33 channel_order from the original configuration in the predict
34 package will be used.
35 """
36 self.tmp_dir = tmp_dir
37 self.update_stats = update_stats
38 self.model_loaded = False
39
40 bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
41 bundle_dir = join(tmp_dir, 'bundle')
42 make_dir(bundle_dir)
43 with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:
44 bundle_zip.extractall(path=bundle_dir)
45
46 config_path = join(bundle_dir, 'pipeline-config.json')
47 config_dict = file_to_json(config_path)
48 rv_config.set_everett_config(
49 config_overrides=config_dict.get('rv_config'))
50 config_dict = upgrade_config(config_dict)
51
52 self.pipeline = build_config(config_dict).build(tmp_dir)
53 self.scene = None
54
55 if not hasattr(self.pipeline, 'predict'):
56 raise Exception(
57 'pipeline in model bundle must have predict method')
58
59 self.scene = self.pipeline.config.dataset.validation_scenes[0]
60
61 if not hasattr(self.scene.raster_source, 'uris'):
62 raise Exception(
63 'raster_source in model bundle must have uris as field')
64
65 if not hasattr(self.scene.label_store, 'uri'):
66 raise Exception(
67 'label_store in model bundle must have uri as field')
68
69 for t in self.scene.raster_source.transformers:
70 t.update_root(bundle_dir)
71
72 if self.update_stats:
73 stats_analyzer = StatsAnalyzerConfig(
74 output_uri=join(bundle_dir, 'stats.json'))
75 self.pipeline.config.analyzers = [stats_analyzer]
76
77 self.scene.label_source = None
78 self.scene.aoi_uris = None
79 self.pipeline.config.dataset.train_scenes = [self.scene]
80 self.pipeline.config.dataset.validation_scenes = [self.scene]
81 self.pipeline.config.dataset.test_scenes = None
82 self.pipeline.config.train_uri = bundle_dir
83
84 if channel_order is not None:
85 self.scene.raster_source.channel_order = channel_order
86
87 def predict(self, image_uris, label_uri, vector_label_uri=None):
88 """Generate predictions for the given image.
89
90 Args:
91 image_uris: URIs of the images to make predictions against.
92 This can be any type of URI readable by Raster Vision
93 FileSystems.
94 label_uri: URI to save labels off into
95 vector_label_uri: URI to save vectorized labels for semantic segmentation
96 model bundles that support it
97 """
98 try:
99 self.scene.raster_source.uris = image_uris
100 self.scene.label_store.uri = label_uri
101 if (hasattr(self.scene.label_store, 'vector_output')
102 and self.scene.label_store.vector_output):
103 if vector_label_uri:
104 for vo in self.scene.label_store.vector_output:
105 vo.uri = join(
106 vector_label_uri, '{}-{}.json'.format(
107 vo.class_id, vo.get_mode()))
108 else:
109 self.scene.label_store.vector_output = []
110 elif vector_label_uri:
111 log.warn(
112 'vector_label_uri was supplied but this model bundle does not '
113 'generate vector labels.')
114
115 if self.update_stats:
116 self.pipeline.analyze()
117 self.pipeline.predict()
118 except ChannelOrderError:
119 raise ValueError(
120 'The predict package is using a channel_order '
121 'with channels unavailable in the imagery.\nTo set a new '
122 'channel_order that only uses channels available in the '
123 'imagery, use the --channel-order option.')
124
[end of rastervision_core/rastervision/core/predictor.py]
[start of rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py]
1 from typing import Optional
2 from os.path import join, basename
3
4 from rastervision.pipeline.config import register_config, Field
5 from rastervision.core.data.raster_transformer.raster_transformer_config import ( # noqa
6 RasterTransformerConfig)
7 from rastervision.core.data.raster_transformer.stats_transformer import ( # noqa
8 StatsTransformer)
9 from rastervision.core.raster_stats import RasterStats
10
11
12 @register_config('stats_transformer')
13 class StatsTransformerConfig(RasterTransformerConfig):
14 stats_uri: Optional[str] = Field(
15 None,
16 description=
17 ('The URI of the output of the StatsAnalyzer. If None, and this Config is '
18 'inside an RVPipeline, then this field will be auto-generated.'))
19
20 def update(self, pipeline=None, scene=None):
21 if pipeline is not None:
22 self.stats_uri = join(pipeline.analyze_uri, 'stats.json')
23
24 def build(self):
25 return StatsTransformer(RasterStats.load(self.stats_uri))
26
27 def update_root(self, root_dir):
28 self.stats_uri = join(root_dir, basename(self.stats_uri))
29
[end of rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py b/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py
--- a/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py
+++ b/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py
@@ -18,7 +18,7 @@
'inside an RVPipeline, then this field will be auto-generated.'))
def update(self, pipeline=None, scene=None):
- if pipeline is not None:
+ if pipeline is not None and self.stats_uri is None:
self.stats_uri = join(pipeline.analyze_uri, 'stats.json')
def build(self):
diff --git a/rastervision_core/rastervision/core/data/scene_config.py b/rastervision_core/rastervision/core/data/scene_config.py
--- a/rastervision_core/rastervision/core/data/scene_config.py
+++ b/rastervision_core/rastervision/core/data/scene_config.py
@@ -60,7 +60,8 @@
super().update()
self.raster_source.update(pipeline=pipeline, scene=self)
- self.label_source.update(pipeline=pipeline, scene=self)
+ if self.label_source is not None:
+ self.label_source.update(pipeline=pipeline, scene=self)
if self.label_store is None and pipeline is not None:
self.label_store = pipeline.get_default_label_store(scene=self)
if self.label_store is not None:
diff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py
--- a/rastervision_core/rastervision/core/predictor.py
+++ b/rastervision_core/rastervision/core/predictor.py
@@ -48,15 +48,8 @@
rv_config.set_everett_config(
config_overrides=config_dict.get('rv_config'))
config_dict = upgrade_config(config_dict)
-
- self.pipeline = build_config(config_dict).build(tmp_dir)
- self.scene = None
-
- if not hasattr(self.pipeline, 'predict'):
- raise Exception(
- 'pipeline in model bundle must have predict method')
-
- self.scene = self.pipeline.config.dataset.validation_scenes[0]
+ self.config = build_config(config_dict)
+ self.scene = self.config.dataset.validation_scenes[0]
if not hasattr(self.scene.raster_source, 'uris'):
raise Exception(
@@ -72,18 +65,20 @@
if self.update_stats:
stats_analyzer = StatsAnalyzerConfig(
output_uri=join(bundle_dir, 'stats.json'))
- self.pipeline.config.analyzers = [stats_analyzer]
+ self.config.analyzers = [stats_analyzer]
self.scene.label_source = None
self.scene.aoi_uris = None
- self.pipeline.config.dataset.train_scenes = [self.scene]
- self.pipeline.config.dataset.validation_scenes = [self.scene]
- self.pipeline.config.dataset.test_scenes = None
- self.pipeline.config.train_uri = bundle_dir
+ self.config.dataset.train_scenes = [self.scene]
+ self.config.dataset.validation_scenes = [self.scene]
+ self.config.dataset.test_scenes = []
+ self.config.train_uri = bundle_dir
if channel_order is not None:
self.scene.raster_source.channel_order = channel_order
+ self.pipeline = None
+
def predict(self, image_uris, label_uri, vector_label_uri=None):
"""Generate predictions for the given image.
@@ -95,6 +90,13 @@
vector_label_uri: URI to save vectorized labels for semantic segmentation
model bundles that support it
"""
+ if self.pipeline is None:
+ self.scene.raster_source.uris = image_uris
+ self.pipeline = self.config.build(self.tmp_dir)
+ if not hasattr(self.pipeline, 'predict'):
+ raise Exception(
+ 'pipeline in model bundle must have predict method')
+
try:
self.scene.raster_source.uris = image_uris
self.scene.label_store.uri = label_uri
|
{"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py b/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py\n--- a/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py\n+++ b/rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py\n@@ -18,7 +18,7 @@\n 'inside an RVPipeline, then this field will be auto-generated.'))\n \n def update(self, pipeline=None, scene=None):\n- if pipeline is not None:\n+ if pipeline is not None and self.stats_uri is None:\n self.stats_uri = join(pipeline.analyze_uri, 'stats.json')\n \n def build(self):\ndiff --git a/rastervision_core/rastervision/core/data/scene_config.py b/rastervision_core/rastervision/core/data/scene_config.py\n--- a/rastervision_core/rastervision/core/data/scene_config.py\n+++ b/rastervision_core/rastervision/core/data/scene_config.py\n@@ -60,7 +60,8 @@\n super().update()\n \n self.raster_source.update(pipeline=pipeline, scene=self)\n- self.label_source.update(pipeline=pipeline, scene=self)\n+ if self.label_source is not None:\n+ self.label_source.update(pipeline=pipeline, scene=self)\n if self.label_store is None and pipeline is not None:\n self.label_store = pipeline.get_default_label_store(scene=self)\n if self.label_store is not None:\ndiff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py\n--- a/rastervision_core/rastervision/core/predictor.py\n+++ b/rastervision_core/rastervision/core/predictor.py\n@@ -48,15 +48,8 @@\n rv_config.set_everett_config(\n config_overrides=config_dict.get('rv_config'))\n config_dict = upgrade_config(config_dict)\n-\n- self.pipeline = build_config(config_dict).build(tmp_dir)\n- self.scene = None\n-\n- if not hasattr(self.pipeline, 'predict'):\n- raise Exception(\n- 'pipeline in model bundle must have predict method')\n-\n- self.scene = self.pipeline.config.dataset.validation_scenes[0]\n+ self.config = build_config(config_dict)\n+ self.scene = self.config.dataset.validation_scenes[0]\n \n if not hasattr(self.scene.raster_source, 'uris'):\n raise Exception(\n@@ -72,18 +65,20 @@\n if self.update_stats:\n stats_analyzer = StatsAnalyzerConfig(\n output_uri=join(bundle_dir, 'stats.json'))\n- self.pipeline.config.analyzers = [stats_analyzer]\n+ self.config.analyzers = [stats_analyzer]\n \n self.scene.label_source = None\n self.scene.aoi_uris = None\n- self.pipeline.config.dataset.train_scenes = [self.scene]\n- self.pipeline.config.dataset.validation_scenes = [self.scene]\n- self.pipeline.config.dataset.test_scenes = None\n- self.pipeline.config.train_uri = bundle_dir\n+ self.config.dataset.train_scenes = [self.scene]\n+ self.config.dataset.validation_scenes = [self.scene]\n+ self.config.dataset.test_scenes = []\n+ self.config.train_uri = bundle_dir\n \n if channel_order is not None:\n self.scene.raster_source.channel_order = channel_order\n \n+ self.pipeline = None\n+\n def predict(self, image_uris, label_uri, vector_label_uri=None):\n \"\"\"Generate predictions for the given image.\n \n@@ -95,6 +90,13 @@\n vector_label_uri: URI to save vectorized labels for semantic segmentation\n model bundles that support it\n \"\"\"\n+ if self.pipeline is None:\n+ self.scene.raster_source.uris = image_uris\n+ self.pipeline = self.config.build(self.tmp_dir)\n+ if not hasattr(self.pipeline, 'predict'):\n+ raise Exception(\n+ 'pipeline in model bundle must have predict method')\n+\n try:\n self.scene.raster_source.uris = image_uris\n self.scene.label_store.uri = label_uri\n", "issue": "Question about using model buddles. \nHi, \r\nthank you for sharing the trained buddles.\r\nHowever, when I tried ISPRS Potsdam segementation buddle, I can not get the prediction.\r\nThe command is here and you can see the error message.\r\nThe buddle is downloaded form your example homepage. \r\n```\r\nrastervision predict $buddle $image_uri $label_uri\r\n```\r\nERROR:\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: '/opt/data/tmp/tmp6ut8_2hy/tmpa7bi6qk2'\r\n```\r\nand \r\n```\r\nrastervision.pipeline.file_system.file_system.NotReadableError: Could not read s3://raster-vision-raw-data/isprs-potsdam/4_Ortho_RGBIR/top_potsdam_2_10_RGBIR.tif\r\n```\r\nI just use local buddle and image. Can you tell me what is wrong with it?\r\nThe same command shows no problem when using other buddles.\r\n\r\nThanks.\n", "before_files": [{"content": "from typing import Optional, List\n\nfrom shapely.geometry import shape\n\nfrom rastervision.pipeline.config import Config, register_config, Field\nfrom rastervision.core.data.raster_source import RasterSourceConfig\nfrom rastervision.core.data.label_source import LabelSourceConfig\nfrom rastervision.core.data.label_store import LabelStoreConfig\nfrom rastervision.core.data.scene import Scene\nfrom rastervision.core.data.vector_source import GeoJSONVectorSourceConfig\n\n\n@register_config('scene')\nclass SceneConfig(Config):\n \"\"\"Config for a Scene which comprises the raster data and labels for an AOI.\"\"\"\n id: str\n raster_source: RasterSourceConfig\n label_source: LabelSourceConfig\n label_store: Optional[LabelStoreConfig] = None\n aoi_uris: Optional[List[str]] = Field(\n None,\n description=\n ('List of URIs of GeoJSON files that define the AOIs for the scene. Each polygon'\n 'defines an AOI which is a piece of the scene that is assumed to be fully '\n 'labeled and usable for training or validation.'))\n\n def build(self, class_config, tmp_dir, use_transformers=True):\n raster_source = self.raster_source.build(\n tmp_dir, use_transformers=use_transformers)\n crs_transformer = raster_source.get_crs_transformer()\n extent = raster_source.get_extent()\n\n label_source = (self.label_source.build(class_config, crs_transformer,\n extent, tmp_dir)\n if self.label_source is not None else None)\n label_store = (self.label_store.build(class_config, crs_transformer,\n extent, tmp_dir)\n if self.label_store is not None else None)\n\n aoi_polygons = None\n if self.aoi_uris is not None:\n aoi_polygons = []\n for uri in self.aoi_uris:\n # Set default class id to 0 to avoid deleting features. If it was\n # set to None, they would all be deleted.\n aoi_geojson = GeoJSONVectorSourceConfig(\n uri=uri, default_class_id=0, ignore_crs_field=True).build(\n class_config, crs_transformer).get_geojson()\n for f in aoi_geojson['features']:\n aoi_polygons.append(shape(f['geometry']))\n\n return Scene(\n self.id,\n raster_source,\n ground_truth_label_source=label_source,\n prediction_label_store=label_store,\n aoi_polygons=aoi_polygons)\n\n def update(self, pipeline=None):\n super().update()\n\n self.raster_source.update(pipeline=pipeline, scene=self)\n self.label_source.update(pipeline=pipeline, scene=self)\n if self.label_store is None and pipeline is not None:\n self.label_store = pipeline.get_default_label_store(scene=self)\n if self.label_store is not None:\n self.label_store.update(pipeline=pipeline, scene=self)\n", "path": "rastervision_core/rastervision/core/data/scene_config.py"}, {"content": "from os.path import join\nimport zipfile\nimport logging\n\nfrom rastervision.pipeline import rv_config\nfrom rastervision.pipeline.config import (build_config, upgrade_config)\nfrom rastervision.pipeline.file_system.utils import (download_if_needed,\n make_dir, file_to_json)\nfrom rastervision.core.data.raster_source import ChannelOrderError\nfrom rastervision.core.analyzer import StatsAnalyzerConfig\n\nlog = logging.getLogger(__name__)\n\n\nclass Predictor():\n \"\"\"Class for making predictions based off of a model bundle.\"\"\"\n\n def __init__(self,\n model_bundle_uri,\n tmp_dir,\n update_stats=False,\n channel_order=None):\n \"\"\"Creates a new Predictor.\n\n Args:\n model_bundle_uri: URI of the model bundle to use. Can be any\n type of URI that Raster Vision can read.\n tmp_dir: Temporary directory in which to store files that are used\n by the Predictor. This directory is not cleaned up by this\n class.\n channel_order: Option for a new channel order to use for the\n imagery being predicted against. If not present, the\n channel_order from the original configuration in the predict\n package will be used.\n \"\"\"\n self.tmp_dir = tmp_dir\n self.update_stats = update_stats\n self.model_loaded = False\n\n bundle_path = download_if_needed(model_bundle_uri, tmp_dir)\n bundle_dir = join(tmp_dir, 'bundle')\n make_dir(bundle_dir)\n with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:\n bundle_zip.extractall(path=bundle_dir)\n\n config_path = join(bundle_dir, 'pipeline-config.json')\n config_dict = file_to_json(config_path)\n rv_config.set_everett_config(\n config_overrides=config_dict.get('rv_config'))\n config_dict = upgrade_config(config_dict)\n\n self.pipeline = build_config(config_dict).build(tmp_dir)\n self.scene = None\n\n if not hasattr(self.pipeline, 'predict'):\n raise Exception(\n 'pipeline in model bundle must have predict method')\n\n self.scene = self.pipeline.config.dataset.validation_scenes[0]\n\n if not hasattr(self.scene.raster_source, 'uris'):\n raise Exception(\n 'raster_source in model bundle must have uris as field')\n\n if not hasattr(self.scene.label_store, 'uri'):\n raise Exception(\n 'label_store in model bundle must have uri as field')\n\n for t in self.scene.raster_source.transformers:\n t.update_root(bundle_dir)\n\n if self.update_stats:\n stats_analyzer = StatsAnalyzerConfig(\n output_uri=join(bundle_dir, 'stats.json'))\n self.pipeline.config.analyzers = [stats_analyzer]\n\n self.scene.label_source = None\n self.scene.aoi_uris = None\n self.pipeline.config.dataset.train_scenes = [self.scene]\n self.pipeline.config.dataset.validation_scenes = [self.scene]\n self.pipeline.config.dataset.test_scenes = None\n self.pipeline.config.train_uri = bundle_dir\n\n if channel_order is not None:\n self.scene.raster_source.channel_order = channel_order\n\n def predict(self, image_uris, label_uri, vector_label_uri=None):\n \"\"\"Generate predictions for the given image.\n\n Args:\n image_uris: URIs of the images to make predictions against.\n This can be any type of URI readable by Raster Vision\n FileSystems.\n label_uri: URI to save labels off into\n vector_label_uri: URI to save vectorized labels for semantic segmentation\n model bundles that support it\n \"\"\"\n try:\n self.scene.raster_source.uris = image_uris\n self.scene.label_store.uri = label_uri\n if (hasattr(self.scene.label_store, 'vector_output')\n and self.scene.label_store.vector_output):\n if vector_label_uri:\n for vo in self.scene.label_store.vector_output:\n vo.uri = join(\n vector_label_uri, '{}-{}.json'.format(\n vo.class_id, vo.get_mode()))\n else:\n self.scene.label_store.vector_output = []\n elif vector_label_uri:\n log.warn(\n 'vector_label_uri was supplied but this model bundle does not '\n 'generate vector labels.')\n\n if self.update_stats:\n self.pipeline.analyze()\n self.pipeline.predict()\n except ChannelOrderError:\n raise ValueError(\n 'The predict package is using a channel_order '\n 'with channels unavailable in the imagery.\\nTo set a new '\n 'channel_order that only uses channels available in the '\n 'imagery, use the --channel-order option.')\n", "path": "rastervision_core/rastervision/core/predictor.py"}, {"content": "from typing import Optional\nfrom os.path import join, basename\n\nfrom rastervision.pipeline.config import register_config, Field\nfrom rastervision.core.data.raster_transformer.raster_transformer_config import ( # noqa\n RasterTransformerConfig)\nfrom rastervision.core.data.raster_transformer.stats_transformer import ( # noqa\n StatsTransformer)\nfrom rastervision.core.raster_stats import RasterStats\n\n\n@register_config('stats_transformer')\nclass StatsTransformerConfig(RasterTransformerConfig):\n stats_uri: Optional[str] = Field(\n None,\n description=\n ('The URI of the output of the StatsAnalyzer. If None, and this Config is '\n 'inside an RVPipeline, then this field will be auto-generated.'))\n\n def update(self, pipeline=None, scene=None):\n if pipeline is not None:\n self.stats_uri = join(pipeline.analyze_uri, 'stats.json')\n\n def build(self):\n return StatsTransformer(RasterStats.load(self.stats_uri))\n\n def update_root(self, root_dir):\n self.stats_uri = join(root_dir, basename(self.stats_uri))\n", "path": "rastervision_core/rastervision/core/data/raster_transformer/stats_transformer_config.py"}]}
| 3,149 | 940 |
gh_patches_debug_37897
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3297
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Subscription checking to hard
The subscription check for certain plugins will fail as soon as it is exceeded by only one token.
If a plugin with 5000 subscriptions is hit by a system with 20.000 users, the subscription check should be fine for 5000 cases out of 20.000.
</issue>
<code>
[start of privacyidea/lib/subscriptions.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2016-09-23 Cornelius Kölbel <[email protected]>
4 # Save and delete subscriptions
5 #
6 # License: AGPLv3
7 #
8 # This code is free software; you can redistribute it and/or
9 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
10 # License as published by the Free Software Foundation; either
11 # version 3 of the License, or any later version.
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public
19 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 #
22 __doc__ = """Save and list subscription information.
23 Provide decorator to test the subscriptions.
24
25 The code is tested in tests/test_lib_subscriptions.py.
26 """
27
28 import logging
29 import datetime
30 import random
31 from .log import log_with
32 from ..models import Subscription
33 from privacyidea.lib.error import SubscriptionError
34 from privacyidea.lib.token import get_tokens
35 from privacyidea.lib.crypto import Sign
36 import functools
37 from privacyidea.lib.framework import get_app_config_value
38 import os
39 import traceback
40 from sqlalchemy import func
41 from six import PY2, string_types
42
43
44 if not PY2:
45 long = int
46
47 SUBSCRIPTION_DATE_FORMAT = "%Y-%m-%d"
48 SIGN_FORMAT = u"""{application}
49 {for_name}
50 {for_address}
51 {for_email}
52 {for_phone}
53 {for_url}
54 {for_comment}
55 {by_name}
56 {by_email}
57 {by_address}
58 {by_phone}
59 {by_url}
60 {date_from}
61 {date_till}
62 {num_users}
63 {num_tokens}
64 {num_clients}
65 {level}
66 """
67
68
69 APPLICATIONS = {"demo_application": 0,
70 "owncloud": 50,
71 "privacyidea-ldap-proxy": 50,
72 "privacyidea-cp": 50,
73 "privacyidea-adfs": 50,
74 "privacyidea-keycloak": 10000,
75 "simplesamlphp": 10000,
76 "privacyidea-simplesamlphp": 10000,
77 "privacyidea authenticator": 10,
78 "privacyidea": 50}
79
80 log = logging.getLogger(__name__)
81
82
83 def get_users_with_active_tokens():
84 """
85 Returns the numbers of users (userId, Resolver) with active tokens.
86
87 :return: Number of users
88 :rtype: int
89 """
90 from privacyidea.models import Token, TokenOwner
91 sql_query = TokenOwner.query.with_entities(TokenOwner.resolver, TokenOwner.user_id)
92 sql_query = sql_query.filter(Token.active == True).filter(Token.id == TokenOwner.token_id).distinct()
93 return sql_query.count()
94
95
96 def subscription_status(component="privacyidea", tokentype=None):
97 """
98 Return the status of the subscription
99
100 0: Token count <= 50
101 1: Token count > 50, no subscription at all
102 2: subscription expired
103 3: subscription OK
104
105 :return: subscription state
106 """
107 token_count = get_tokens(assigned=True, active=True, count=True, tokentype=tokentype)
108 if token_count <= APPLICATIONS.get(component, 50):
109 return 0
110
111 subscriptions = get_subscription(component)
112 if len(subscriptions) == 0:
113 return 1
114
115 try:
116 check_subscription(component)
117 except SubscriptionError as exx:
118 log.warning(u"{0}".format(exx))
119 return 2
120
121 return 3
122
123
124 @log_with(log)
125 def save_subscription(subscription):
126 """
127 Saves a subscription to the database. If the subscription already exists,
128 it is updated.
129
130 :param subscription: dictionary with all attributes of the
131 subscription
132 :type subscription: dict
133 :return: True in case of success
134 """
135 if isinstance(subscription.get("date_from"), string_types):
136 subscription["date_from"] = datetime.datetime.strptime(
137 subscription.get("date_from"), SUBSCRIPTION_DATE_FORMAT)
138 if isinstance(subscription.get("date_till"), string_types):
139 subscription["date_till"] = datetime.datetime.strptime(
140 subscription.get("date_till"), SUBSCRIPTION_DATE_FORMAT)
141
142 # verify the signature of the subscriptions
143 check_signature(subscription)
144
145 s = Subscription(application=subscription.get("application"),
146 for_name=subscription.get("for_name"),
147 for_address=subscription.get("for_address"),
148 for_email=subscription.get("for_email"),
149 for_phone=subscription.get("for_phone"),
150 for_url=subscription.get("for_url"),
151 for_comment=subscription.get("for_comment"),
152 by_name=subscription.get("by_name"),
153 by_email=subscription.get("by_email"),
154 by_address=subscription.get("by_address"),
155 by_phone=subscription.get("by_phone"),
156 by_url=subscription.get("by_url"),
157 date_from=subscription.get("date_from"),
158 date_till=subscription.get("date_till"),
159 num_users=subscription.get("num_users"),
160 num_tokens=subscription.get("num_tokens"),
161 num_clients=subscription.get("num_clients"),
162 level=subscription.get("level"),
163 signature=subscription.get("signature")
164 ).save()
165 return s
166
167
168 def get_subscription(application=None):
169 """
170 Return a list of subscriptions for a certain application
171 If application is omitted, all applications are returned.
172
173 :param application: Name of the application
174 :return: list of subscription dictionaries
175 """
176 subscriptions = []
177 sql_query = Subscription.query
178 if application:
179 sql_query = sql_query.filter(func.lower(Subscription.application) ==
180 application.lower())
181
182 for sub in sql_query.all():
183 subscriptions.append(sub.get())
184
185 return subscriptions
186
187
188 @log_with(log)
189 def delete_subscription(application):
190 """
191 Delete the subscription for the given application
192
193 :param application:
194 :return: True in case of success
195 """
196 ret = -1
197 sub = Subscription.query.filter(Subscription.application ==
198 application).first()
199
200 if sub:
201 sub.delete()
202 ret = sub.id
203 return ret
204
205
206 def raise_exception_probability(subscription=None):
207 """
208 Depending on the subscription this will return True, so that an exception
209 can be raised
210
211 :param subscription: Subscription dictionary
212 :return: Bool
213 """
214 if not subscription:
215 # No subscription at all. We are in a kind of demo mode and return
216 # True with a 50% chance
217 return random.randrange(0, 2)
218
219 expire = subscription.get("date_till")
220 delta = datetime.datetime.now() - expire
221 if delta.days > 0:
222 # calculate a certain probability <1
223 # After 44 days we get 50%
224 # After 74 days we get 80%
225 # After 94 days we get 100%
226 p = 0.2 + ((delta.days-14.0)/30.0) * 0.3
227 return random.random() < p
228
229 return False
230
231
232 def check_subscription(application, max_free_subscriptions=None):
233 """
234 This checks if the subscription for the given application is valid.
235 In case of a failure an Exception is raised.
236
237 :param application: the name of the application to check
238 :param max_free_subscriptions: the maximum number of subscriptions
239 without a subscription file. If not given, the default is used.
240 :return: bool
241 """
242 if application.lower() in APPLICATIONS:
243 subscriptions = get_subscription(application) or get_subscription(
244 application.lower())
245 # get the number of users with active tokens
246 token_users = get_users_with_active_tokens()
247 free_subscriptions = max_free_subscriptions or APPLICATIONS.get(application.lower())
248 if len(subscriptions) == 0:
249 if token_users > free_subscriptions:
250 raise SubscriptionError(description="No subscription for your client.",
251 application=application)
252 else:
253 subscription = subscriptions[0]
254 expire_date = subscription.get("date_till")
255 if expire_date < datetime.datetime.now():
256 # subscription has expired
257 if raise_exception_probability(subscription):
258 raise SubscriptionError(description="Your subscription "
259 "expired.",
260 application=application)
261 else:
262 # subscription is still valid, so check the signature.
263 check_signature(subscription)
264 if token_users > subscription.get("num_tokens"):
265 # subscription is exceeded
266 raise SubscriptionError(description="Too many users "
267 "with assigned tokens. "
268 "Subscription exceeded.",
269 application=application)
270
271 return True
272
273
274 def check_signature(subscription):
275 """
276 This function checks the signature of a subscription. If the signature
277 checking fails, a SignatureError / Exception is raised.
278
279 :param subscription: The dict of the subscription
280 :return: True
281 """
282 vendor = subscription.get("by_name").split()[0]
283 enckey = get_app_config_value("PI_ENCFILE", "/etc/privacyidea/enckey")
284 dirname = os.path.dirname(enckey)
285 # In dirname we are searching for <vendor>.pem
286 filename = u"{0!s}/{1!s}.pem".format(dirname, vendor)
287
288 try:
289 # remove the minutes 00:00:00
290 subscription["date_from"] = subscription.get("date_from").strftime(SUBSCRIPTION_DATE_FORMAT)
291 subscription["date_till"] = subscription.get("date_till").strftime(SUBSCRIPTION_DATE_FORMAT)
292 sign_string = SIGN_FORMAT.format(**subscription)
293 with open(filename, 'rb') as key_file:
294 sign_obj = Sign(private_key=None, public_key=key_file.read())
295
296 signature = subscription.get('signature', '100')
297 r = sign_obj.verify(sign_string, signature, verify_old_sigs=True)
298 subscription["date_from"] = datetime.datetime.strptime(
299 subscription.get("date_from"),
300 SUBSCRIPTION_DATE_FORMAT)
301 subscription["date_till"] = datetime.datetime.strptime(
302 subscription.get("date_till"),
303 SUBSCRIPTION_DATE_FORMAT)
304 except Exception as _e:
305 log.debug(traceback.format_exc())
306 raise SubscriptionError("Verifying the signature of your subscription "
307 "failed.",
308 application=subscription.get("application"))
309
310 if not r:
311 raise SubscriptionError("Signature of your subscription does not "
312 "match.",
313 application=subscription.get("application"))
314
315 return r
316
317
318 class CheckSubscription(object):
319 """
320 Decorator to decorate an API request and check if the subscription is valid.
321 For this, we evaluate the requesting client.
322 If the subscription for this client is not valid, we raise an exception.
323 """
324
325 def __init__(self, request):
326 self.request = request
327
328 def __call__(self, func):
329 @functools.wraps(func)
330 def check_subscription_wrapper(*args, **kwds):
331 request = self.request
332 ua = request.user_agent
333 ua_str = "{0!s}".format(ua) or "unknown"
334 application = ua_str.split()[0]
335 # check and raise if fails
336 #check_subscription("privacyidea")
337 check_subscription(application)
338 f_result = func(*args, **kwds)
339 return f_result
340
341 return check_subscription_wrapper
342
[end of privacyidea/lib/subscriptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/lib/subscriptions.py b/privacyidea/lib/subscriptions.py
--- a/privacyidea/lib/subscriptions.py
+++ b/privacyidea/lib/subscriptions.py
@@ -205,8 +205,8 @@
def raise_exception_probability(subscription=None):
"""
- Depending on the subscription this will return True, so that an exception
- can be raised
+ Depending on the subscription expiration data this will return True,
+ so that an exception can be raised
:param subscription: Subscription dictionary
:return: Bool
@@ -229,6 +229,26 @@
return False
+def subscription_exceeded_probability(active_tokens, allowed_tokens):
+ """
+ Depending on the subscription token numbers, this will return True,
+ so that an exception can be raised.
+
+ Returns true if a Subscription Exception is to be raised.
+
+ :param active_tokens: The number of the active tokens
+ :param allowed_tokens: The number of the allowed tokens
+ :return:
+ """
+ # old, hard behaviour
+ # return active_tokens > allowed_tokens
+ if active_tokens > allowed_tokens:
+ prob_check = random.randrange(active_tokens +1)
+ return prob_check > allowed_tokens
+ else:
+ return False
+
+
def check_subscription(application, max_free_subscriptions=None):
"""
This checks if the subscription for the given application is valid.
@@ -246,7 +266,7 @@
token_users = get_users_with_active_tokens()
free_subscriptions = max_free_subscriptions or APPLICATIONS.get(application.lower())
if len(subscriptions) == 0:
- if token_users > free_subscriptions:
+ if subscription_exceeded_probability(token_users, free_subscriptions):
raise SubscriptionError(description="No subscription for your client.",
application=application)
else:
@@ -261,7 +281,8 @@
else:
# subscription is still valid, so check the signature.
check_signature(subscription)
- if token_users > subscription.get("num_tokens"):
+ allowed_tokennums = subscription.get("num_tokens")
+ if subscription_exceeded_probability(token_users, allowed_tokennums):
# subscription is exceeded
raise SubscriptionError(description="Too many users "
"with assigned tokens. "
|
{"golden_diff": "diff --git a/privacyidea/lib/subscriptions.py b/privacyidea/lib/subscriptions.py\n--- a/privacyidea/lib/subscriptions.py\n+++ b/privacyidea/lib/subscriptions.py\n@@ -205,8 +205,8 @@\n \n def raise_exception_probability(subscription=None):\n \"\"\"\n- Depending on the subscription this will return True, so that an exception\n- can be raised\n+ Depending on the subscription expiration data this will return True,\n+ so that an exception can be raised\n \n :param subscription: Subscription dictionary\n :return: Bool\n@@ -229,6 +229,26 @@\n return False\n \n \n+def subscription_exceeded_probability(active_tokens, allowed_tokens):\n+ \"\"\"\n+ Depending on the subscription token numbers, this will return True,\n+ so that an exception can be raised.\n+\n+ Returns true if a Subscription Exception is to be raised.\n+\n+ :param active_tokens: The number of the active tokens\n+ :param allowed_tokens: The number of the allowed tokens\n+ :return:\n+ \"\"\"\n+ # old, hard behaviour\n+ # return active_tokens > allowed_tokens\n+ if active_tokens > allowed_tokens:\n+ prob_check = random.randrange(active_tokens +1)\n+ return prob_check > allowed_tokens\n+ else:\n+ return False\n+\n+\n def check_subscription(application, max_free_subscriptions=None):\n \"\"\"\n This checks if the subscription for the given application is valid.\n@@ -246,7 +266,7 @@\n token_users = get_users_with_active_tokens()\n free_subscriptions = max_free_subscriptions or APPLICATIONS.get(application.lower())\n if len(subscriptions) == 0:\n- if token_users > free_subscriptions:\n+ if subscription_exceeded_probability(token_users, free_subscriptions):\n raise SubscriptionError(description=\"No subscription for your client.\",\n application=application)\n else:\n@@ -261,7 +281,8 @@\n else:\n # subscription is still valid, so check the signature.\n check_signature(subscription)\n- if token_users > subscription.get(\"num_tokens\"):\n+ allowed_tokennums = subscription.get(\"num_tokens\")\n+ if subscription_exceeded_probability(token_users, allowed_tokennums):\n # subscription is exceeded\n raise SubscriptionError(description=\"Too many users \"\n \"with assigned tokens. \"\n", "issue": "Subscription checking to hard\nThe subscription check for certain plugins will fail as soon as it is exceeded by only one token.\r\n\r\nIf a plugin with 5000 subscriptions is hit by a system with 20.000 users, the subscription check should be fine for 5000 cases out of 20.000.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2016-09-23 Cornelius K\u00f6lbel <[email protected]>\n# Save and delete subscriptions\n#\n# License: AGPLv3\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"Save and list subscription information.\nProvide decorator to test the subscriptions.\n\nThe code is tested in tests/test_lib_subscriptions.py.\n\"\"\"\n\nimport logging\nimport datetime\nimport random\nfrom .log import log_with\nfrom ..models import Subscription\nfrom privacyidea.lib.error import SubscriptionError\nfrom privacyidea.lib.token import get_tokens\nfrom privacyidea.lib.crypto import Sign\nimport functools\nfrom privacyidea.lib.framework import get_app_config_value\nimport os\nimport traceback\nfrom sqlalchemy import func\nfrom six import PY2, string_types\n\n\nif not PY2:\n long = int\n\nSUBSCRIPTION_DATE_FORMAT = \"%Y-%m-%d\"\nSIGN_FORMAT = u\"\"\"{application}\n{for_name}\n{for_address}\n{for_email}\n{for_phone}\n{for_url}\n{for_comment}\n{by_name}\n{by_email}\n{by_address}\n{by_phone}\n{by_url}\n{date_from}\n{date_till}\n{num_users}\n{num_tokens}\n{num_clients}\n{level}\n\"\"\"\n\n\nAPPLICATIONS = {\"demo_application\": 0,\n \"owncloud\": 50,\n \"privacyidea-ldap-proxy\": 50,\n \"privacyidea-cp\": 50,\n \"privacyidea-adfs\": 50,\n \"privacyidea-keycloak\": 10000,\n \"simplesamlphp\": 10000,\n \"privacyidea-simplesamlphp\": 10000,\n \"privacyidea authenticator\": 10,\n \"privacyidea\": 50}\n\nlog = logging.getLogger(__name__)\n\n\ndef get_users_with_active_tokens():\n \"\"\"\n Returns the numbers of users (userId, Resolver) with active tokens.\n\n :return: Number of users\n :rtype: int\n \"\"\"\n from privacyidea.models import Token, TokenOwner\n sql_query = TokenOwner.query.with_entities(TokenOwner.resolver, TokenOwner.user_id)\n sql_query = sql_query.filter(Token.active == True).filter(Token.id == TokenOwner.token_id).distinct()\n return sql_query.count()\n\n\ndef subscription_status(component=\"privacyidea\", tokentype=None):\n \"\"\"\n Return the status of the subscription\n\n 0: Token count <= 50\n 1: Token count > 50, no subscription at all\n 2: subscription expired\n 3: subscription OK\n\n :return: subscription state\n \"\"\"\n token_count = get_tokens(assigned=True, active=True, count=True, tokentype=tokentype)\n if token_count <= APPLICATIONS.get(component, 50):\n return 0\n\n subscriptions = get_subscription(component)\n if len(subscriptions) == 0:\n return 1\n\n try:\n check_subscription(component)\n except SubscriptionError as exx:\n log.warning(u\"{0}\".format(exx))\n return 2\n\n return 3\n\n\n@log_with(log)\ndef save_subscription(subscription):\n \"\"\"\n Saves a subscription to the database. If the subscription already exists,\n it is updated.\n\n :param subscription: dictionary with all attributes of the\n subscription\n :type subscription: dict\n :return: True in case of success\n \"\"\"\n if isinstance(subscription.get(\"date_from\"), string_types):\n subscription[\"date_from\"] = datetime.datetime.strptime(\n subscription.get(\"date_from\"), SUBSCRIPTION_DATE_FORMAT)\n if isinstance(subscription.get(\"date_till\"), string_types):\n subscription[\"date_till\"] = datetime.datetime.strptime(\n subscription.get(\"date_till\"), SUBSCRIPTION_DATE_FORMAT)\n\n # verify the signature of the subscriptions\n check_signature(subscription)\n\n s = Subscription(application=subscription.get(\"application\"),\n for_name=subscription.get(\"for_name\"),\n for_address=subscription.get(\"for_address\"),\n for_email=subscription.get(\"for_email\"),\n for_phone=subscription.get(\"for_phone\"),\n for_url=subscription.get(\"for_url\"),\n for_comment=subscription.get(\"for_comment\"),\n by_name=subscription.get(\"by_name\"),\n by_email=subscription.get(\"by_email\"),\n by_address=subscription.get(\"by_address\"),\n by_phone=subscription.get(\"by_phone\"),\n by_url=subscription.get(\"by_url\"),\n date_from=subscription.get(\"date_from\"),\n date_till=subscription.get(\"date_till\"),\n num_users=subscription.get(\"num_users\"),\n num_tokens=subscription.get(\"num_tokens\"),\n num_clients=subscription.get(\"num_clients\"),\n level=subscription.get(\"level\"),\n signature=subscription.get(\"signature\")\n ).save()\n return s\n\n\ndef get_subscription(application=None):\n \"\"\"\n Return a list of subscriptions for a certain application\n If application is omitted, all applications are returned.\n\n :param application: Name of the application\n :return: list of subscription dictionaries\n \"\"\"\n subscriptions = []\n sql_query = Subscription.query\n if application:\n sql_query = sql_query.filter(func.lower(Subscription.application) ==\n application.lower())\n\n for sub in sql_query.all():\n subscriptions.append(sub.get())\n\n return subscriptions\n\n\n@log_with(log)\ndef delete_subscription(application):\n \"\"\"\n Delete the subscription for the given application\n\n :param application:\n :return: True in case of success\n \"\"\"\n ret = -1\n sub = Subscription.query.filter(Subscription.application ==\n application).first()\n\n if sub:\n sub.delete()\n ret = sub.id\n return ret\n\n\ndef raise_exception_probability(subscription=None):\n \"\"\"\n Depending on the subscription this will return True, so that an exception\n can be raised\n\n :param subscription: Subscription dictionary\n :return: Bool\n \"\"\"\n if not subscription:\n # No subscription at all. We are in a kind of demo mode and return\n # True with a 50% chance\n return random.randrange(0, 2)\n\n expire = subscription.get(\"date_till\")\n delta = datetime.datetime.now() - expire\n if delta.days > 0:\n # calculate a certain probability <1\n # After 44 days we get 50%\n # After 74 days we get 80%\n # After 94 days we get 100%\n p = 0.2 + ((delta.days-14.0)/30.0) * 0.3\n return random.random() < p\n\n return False\n\n\ndef check_subscription(application, max_free_subscriptions=None):\n \"\"\"\n This checks if the subscription for the given application is valid.\n In case of a failure an Exception is raised.\n\n :param application: the name of the application to check\n :param max_free_subscriptions: the maximum number of subscriptions\n without a subscription file. If not given, the default is used.\n :return: bool\n \"\"\"\n if application.lower() in APPLICATIONS:\n subscriptions = get_subscription(application) or get_subscription(\n application.lower())\n # get the number of users with active tokens\n token_users = get_users_with_active_tokens()\n free_subscriptions = max_free_subscriptions or APPLICATIONS.get(application.lower())\n if len(subscriptions) == 0:\n if token_users > free_subscriptions:\n raise SubscriptionError(description=\"No subscription for your client.\",\n application=application)\n else:\n subscription = subscriptions[0]\n expire_date = subscription.get(\"date_till\")\n if expire_date < datetime.datetime.now():\n # subscription has expired\n if raise_exception_probability(subscription):\n raise SubscriptionError(description=\"Your subscription \"\n \"expired.\",\n application=application)\n else:\n # subscription is still valid, so check the signature.\n check_signature(subscription)\n if token_users > subscription.get(\"num_tokens\"):\n # subscription is exceeded\n raise SubscriptionError(description=\"Too many users \"\n \"with assigned tokens. \"\n \"Subscription exceeded.\",\n application=application)\n\n return True\n\n\ndef check_signature(subscription):\n \"\"\"\n This function checks the signature of a subscription. If the signature\n checking fails, a SignatureError / Exception is raised.\n\n :param subscription: The dict of the subscription\n :return: True\n \"\"\"\n vendor = subscription.get(\"by_name\").split()[0]\n enckey = get_app_config_value(\"PI_ENCFILE\", \"/etc/privacyidea/enckey\")\n dirname = os.path.dirname(enckey)\n # In dirname we are searching for <vendor>.pem\n filename = u\"{0!s}/{1!s}.pem\".format(dirname, vendor)\n\n try:\n # remove the minutes 00:00:00\n subscription[\"date_from\"] = subscription.get(\"date_from\").strftime(SUBSCRIPTION_DATE_FORMAT)\n subscription[\"date_till\"] = subscription.get(\"date_till\").strftime(SUBSCRIPTION_DATE_FORMAT)\n sign_string = SIGN_FORMAT.format(**subscription)\n with open(filename, 'rb') as key_file:\n sign_obj = Sign(private_key=None, public_key=key_file.read())\n\n signature = subscription.get('signature', '100')\n r = sign_obj.verify(sign_string, signature, verify_old_sigs=True)\n subscription[\"date_from\"] = datetime.datetime.strptime(\n subscription.get(\"date_from\"),\n SUBSCRIPTION_DATE_FORMAT)\n subscription[\"date_till\"] = datetime.datetime.strptime(\n subscription.get(\"date_till\"),\n SUBSCRIPTION_DATE_FORMAT)\n except Exception as _e:\n log.debug(traceback.format_exc())\n raise SubscriptionError(\"Verifying the signature of your subscription \"\n \"failed.\",\n application=subscription.get(\"application\"))\n\n if not r:\n raise SubscriptionError(\"Signature of your subscription does not \"\n \"match.\",\n application=subscription.get(\"application\"))\n\n return r\n\n\nclass CheckSubscription(object):\n \"\"\"\n Decorator to decorate an API request and check if the subscription is valid.\n For this, we evaluate the requesting client.\n If the subscription for this client is not valid, we raise an exception.\n \"\"\"\n\n def __init__(self, request):\n self.request = request\n\n def __call__(self, func):\n @functools.wraps(func)\n def check_subscription_wrapper(*args, **kwds):\n request = self.request\n ua = request.user_agent\n ua_str = \"{0!s}\".format(ua) or \"unknown\"\n application = ua_str.split()[0]\n # check and raise if fails\n #check_subscription(\"privacyidea\")\n check_subscription(application)\n f_result = func(*args, **kwds)\n return f_result\n\n return check_subscription_wrapper\n", "path": "privacyidea/lib/subscriptions.py"}]}
| 4,010 | 509 |
gh_patches_debug_20650
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-3311
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Save Data crashes on no data
##### Orange version
3.16, caused by #3147
##### Actual behavior
Error that is_spare does not exist for None object.
##### Steps to reproduce the behavior
Connect and disconnect a file to the File widget. You can also connect an empty selection.
</issue>
<code>
[start of Orange/widgets/data/owsave.py]
1 import os.path
2 import pathlib
3
4 from AnyQt.QtWidgets import QFormLayout
5 from AnyQt.QtCore import Qt
6
7 from Orange.data.table import Table
8 from Orange.data.io import Compression, FileFormat, TabReader, CSVReader, PickleReader
9 from Orange.widgets import gui, widget
10 from Orange.widgets.settings import Setting
11 from Orange.widgets.utils import filedialogs
12 from Orange.widgets.widget import Input
13
14 FILE_TYPES = [
15 ("{} ({})".format(w.DESCRIPTION, w.EXTENSIONS[0]),
16 w.EXTENSIONS[0],
17 w.SUPPORT_SPARSE_DATA)
18 for w in (TabReader, CSVReader, PickleReader)
19 ]
20
21 COMPRESSIONS = [
22 ("gzip ({})".format(Compression.GZIP), Compression.GZIP),
23 ("bzip2 ({})".format(Compression.BZIP2), Compression.BZIP2),
24 ("lzma ({})".format(Compression.XZ), Compression.XZ),
25 ]
26
27
28 class OWSave(widget.OWWidget):
29 name = "Save Data"
30 description = "Save data to an output file."
31 icon = "icons/Save.svg"
32 category = "Data"
33 keywords = []
34
35 class Inputs:
36 data = Input("Data", Table)
37
38 class Error(widget.OWWidget.Error):
39 unsupported_extension = widget.Msg("Selected extension is not supported.")
40
41 want_main_area = False
42 resizing_enabled = False
43
44 last_dir = Setting("")
45 auto_save = Setting(False)
46 filetype = Setting(FILE_TYPES[0][0])
47 compression = Setting(COMPRESSIONS[0][0])
48 compress = Setting(False)
49
50 def __init__(self):
51 super().__init__()
52 self.data = None
53 self.filename = ""
54 self.basename = ""
55 self.type_ext = ""
56 self.compress_ext = ""
57 self.writer = None
58
59 form = QFormLayout(
60 labelAlignment=Qt.AlignLeft,
61 formAlignment=Qt.AlignLeft,
62 rowWrapPolicy=QFormLayout.WrapLongRows,
63 verticalSpacing=10,
64 )
65
66 box = gui.vBox(self.controlArea, "Format")
67
68 gui.comboBox(
69 box, self, "filetype",
70 callback=self._update_text,
71 items=[item for item, _, _ in FILE_TYPES],
72 sendSelectedValue=True,
73 )
74 form.addRow("File type", self.controls.filetype, )
75
76 gui.comboBox(
77 box, self, "compression",
78 callback=self._update_text,
79 items=[item for item, _ in COMPRESSIONS],
80 sendSelectedValue=True,
81 )
82 gui.checkBox(
83 box, self, "compress", label="Use compression",
84 callback=self._update_text,
85 )
86
87 form.addRow(self.controls.compress, self.controls.compression)
88
89 box.layout().addLayout(form)
90
91 self.save = gui.auto_commit(
92 self.controlArea, self, "auto_save", "Save", box=False,
93 commit=self.save_file, callback=self.adjust_label,
94 disabled=True, addSpace=True
95 )
96 self.save_as = gui.button(
97 self.controlArea, self, "Save As...",
98 callback=self.save_file_as, disabled=True
99 )
100 self.save_as.setMinimumWidth(220)
101 self.adjustSize()
102
103 def get_writer_selected(self):
104 writer = FileFormat.get_reader(self.type_ext)
105
106 ext = self.type_ext + self.compress_ext
107 if ext not in writer.EXTENSIONS:
108 self.Error.unsupported_extension()
109 return None
110 writer.EXTENSIONS = [ext]
111 return writer
112
113 @classmethod
114 def remove_extensions(cls, filename):
115 if not filename:
116 return None
117 for ext in pathlib.PurePosixPath(filename).suffixes:
118 filename = filename.replace(ext, '')
119 return filename
120
121 def adjust_label(self):
122 if self.filename:
123 text = "Auto save as '{}'" if self.auto_save else "Save as '{}'"
124 self.save.button.setText(
125 text.format(self.basename + self.type_ext + self.compress_ext))
126
127 @Inputs.data
128 def dataset(self, data):
129 self.data = data
130 self.save.setDisabled(data is None)
131 self.save_as.setDisabled(data is None)
132 if data is not None:
133 self.save_file()
134
135 self.controls.filetype.clear()
136 if self.data.is_sparse():
137 self.controls.filetype.insertItems(0, [item for item, _, supports_sparse in FILE_TYPES
138 if supports_sparse])
139 else:
140 self.controls.filetype.insertItems(0, [item for item, _, _ in FILE_TYPES])
141
142 def save_file_as(self):
143 file_name = self.remove_extensions(self.filename) or os.path.join(
144 self.last_dir or os.path.expanduser("~"),
145 getattr(self.data, 'name', ''))
146 self.update_extension()
147 writer = self.get_writer_selected()
148 if not writer:
149 return
150
151 filename, writer, _ = filedialogs.open_filename_dialog_save(
152 file_name, '', [writer],
153 )
154 if not filename:
155 return
156
157 self.filename = filename
158 self.writer = writer
159 self.last_dir = os.path.split(self.filename)[0]
160 self.basename = os.path.basename(self.remove_extensions(filename))
161 self.unconditional_save_file()
162 self.adjust_label()
163
164 def save_file(self):
165 if self.data is None:
166 return
167 if not self.filename:
168 self.save_file_as()
169 else:
170 try:
171 self.writer.write(os.path.join(self.last_dir,
172 self.basename + self.type_ext + self.compress_ext),
173 self.data)
174 except Exception as err_value:
175 self.error(str(err_value))
176 else:
177 self.error()
178
179 def update_extension(self):
180 self.type_ext = [ext for name, ext, _ in FILE_TYPES if name == self.filetype][0]
181 self.compress_ext = dict(COMPRESSIONS)[self.compression] if self.compress else ''
182
183 def _update_text(self):
184 self.update_extension()
185 self.adjust_label()
186
187
188 if __name__ == "__main__":
189 import sys
190 from AnyQt.QtWidgets import QApplication
191
192 a = QApplication(sys.argv)
193 table = Table("iris")
194
195 ow = OWSave()
196 ow.show()
197 ow.dataset(table)
198 a.exec()
199 ow.saveSettings()
200
[end of Orange/widgets/data/owsave.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Orange/widgets/data/owsave.py b/Orange/widgets/data/owsave.py
--- a/Orange/widgets/data/owsave.py
+++ b/Orange/widgets/data/owsave.py
@@ -129,15 +129,18 @@
self.data = data
self.save.setDisabled(data is None)
self.save_as.setDisabled(data is None)
- if data is not None:
- self.save_file()
+ if data is None:
+ return
- self.controls.filetype.clear()
- if self.data.is_sparse():
- self.controls.filetype.insertItems(0, [item for item, _, supports_sparse in FILE_TYPES
- if supports_sparse])
- else:
- self.controls.filetype.insertItems(0, [item for item, _, _ in FILE_TYPES])
+ items = [item for item, _, supports_sparse in FILE_TYPES
+ if supports_sparse or not data.is_sparse()]
+ if items != [self.controls.filetype.itemText(i) for i in
+ range(self.controls.filetype.count())]:
+ self.controls.filetype.clear()
+ self.controls.filetype.insertItems(0, items)
+ self.update_extension()
+
+ self.save_file()
def save_file_as(self):
file_name = self.remove_extensions(self.filename) or os.path.join(
|
{"golden_diff": "diff --git a/Orange/widgets/data/owsave.py b/Orange/widgets/data/owsave.py\n--- a/Orange/widgets/data/owsave.py\n+++ b/Orange/widgets/data/owsave.py\n@@ -129,15 +129,18 @@\n self.data = data\n self.save.setDisabled(data is None)\n self.save_as.setDisabled(data is None)\n- if data is not None:\n- self.save_file()\n+ if data is None:\n+ return\n \n- self.controls.filetype.clear()\n- if self.data.is_sparse():\n- self.controls.filetype.insertItems(0, [item for item, _, supports_sparse in FILE_TYPES\n- if supports_sparse])\n- else:\n- self.controls.filetype.insertItems(0, [item for item, _, _ in FILE_TYPES])\n+ items = [item for item, _, supports_sparse in FILE_TYPES\n+ if supports_sparse or not data.is_sparse()]\n+ if items != [self.controls.filetype.itemText(i) for i in\n+ range(self.controls.filetype.count())]:\n+ self.controls.filetype.clear()\n+ self.controls.filetype.insertItems(0, items)\n+ self.update_extension()\n+\n+ self.save_file()\n \n def save_file_as(self):\n file_name = self.remove_extensions(self.filename) or os.path.join(\n", "issue": "Save Data crashes on no data\n##### Orange version\r\n3.16, caused by #3147 \r\n\r\n##### Actual behavior\r\nError that is_spare does not exist for None object.\r\n\r\n##### Steps to reproduce the behavior\r\nConnect and disconnect a file to the File widget. You can also connect an empty selection.\r\n\n", "before_files": [{"content": "import os.path\nimport pathlib\n\nfrom AnyQt.QtWidgets import QFormLayout\nfrom AnyQt.QtCore import Qt\n\nfrom Orange.data.table import Table\nfrom Orange.data.io import Compression, FileFormat, TabReader, CSVReader, PickleReader\nfrom Orange.widgets import gui, widget\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils import filedialogs\nfrom Orange.widgets.widget import Input\n\nFILE_TYPES = [\n (\"{} ({})\".format(w.DESCRIPTION, w.EXTENSIONS[0]),\n w.EXTENSIONS[0],\n w.SUPPORT_SPARSE_DATA)\n for w in (TabReader, CSVReader, PickleReader)\n]\n\nCOMPRESSIONS = [\n (\"gzip ({})\".format(Compression.GZIP), Compression.GZIP),\n (\"bzip2 ({})\".format(Compression.BZIP2), Compression.BZIP2),\n (\"lzma ({})\".format(Compression.XZ), Compression.XZ),\n]\n\n\nclass OWSave(widget.OWWidget):\n name = \"Save Data\"\n description = \"Save data to an output file.\"\n icon = \"icons/Save.svg\"\n category = \"Data\"\n keywords = []\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Error(widget.OWWidget.Error):\n unsupported_extension = widget.Msg(\"Selected extension is not supported.\")\n\n want_main_area = False\n resizing_enabled = False\n\n last_dir = Setting(\"\")\n auto_save = Setting(False)\n filetype = Setting(FILE_TYPES[0][0])\n compression = Setting(COMPRESSIONS[0][0])\n compress = Setting(False)\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.filename = \"\"\n self.basename = \"\"\n self.type_ext = \"\"\n self.compress_ext = \"\"\n self.writer = None\n\n form = QFormLayout(\n labelAlignment=Qt.AlignLeft,\n formAlignment=Qt.AlignLeft,\n rowWrapPolicy=QFormLayout.WrapLongRows,\n verticalSpacing=10,\n )\n\n box = gui.vBox(self.controlArea, \"Format\")\n\n gui.comboBox(\n box, self, \"filetype\",\n callback=self._update_text,\n items=[item for item, _, _ in FILE_TYPES],\n sendSelectedValue=True,\n )\n form.addRow(\"File type\", self.controls.filetype, )\n\n gui.comboBox(\n box, self, \"compression\",\n callback=self._update_text,\n items=[item for item, _ in COMPRESSIONS],\n sendSelectedValue=True,\n )\n gui.checkBox(\n box, self, \"compress\", label=\"Use compression\",\n callback=self._update_text,\n )\n\n form.addRow(self.controls.compress, self.controls.compression)\n\n box.layout().addLayout(form)\n\n self.save = gui.auto_commit(\n self.controlArea, self, \"auto_save\", \"Save\", box=False,\n commit=self.save_file, callback=self.adjust_label,\n disabled=True, addSpace=True\n )\n self.save_as = gui.button(\n self.controlArea, self, \"Save As...\",\n callback=self.save_file_as, disabled=True\n )\n self.save_as.setMinimumWidth(220)\n self.adjustSize()\n\n def get_writer_selected(self):\n writer = FileFormat.get_reader(self.type_ext)\n\n ext = self.type_ext + self.compress_ext\n if ext not in writer.EXTENSIONS:\n self.Error.unsupported_extension()\n return None\n writer.EXTENSIONS = [ext]\n return writer\n\n @classmethod\n def remove_extensions(cls, filename):\n if not filename:\n return None\n for ext in pathlib.PurePosixPath(filename).suffixes:\n filename = filename.replace(ext, '')\n return filename\n\n def adjust_label(self):\n if self.filename:\n text = \"Auto save as '{}'\" if self.auto_save else \"Save as '{}'\"\n self.save.button.setText(\n text.format(self.basename + self.type_ext + self.compress_ext))\n\n @Inputs.data\n def dataset(self, data):\n self.data = data\n self.save.setDisabled(data is None)\n self.save_as.setDisabled(data is None)\n if data is not None:\n self.save_file()\n\n self.controls.filetype.clear()\n if self.data.is_sparse():\n self.controls.filetype.insertItems(0, [item for item, _, supports_sparse in FILE_TYPES\n if supports_sparse])\n else:\n self.controls.filetype.insertItems(0, [item for item, _, _ in FILE_TYPES])\n\n def save_file_as(self):\n file_name = self.remove_extensions(self.filename) or os.path.join(\n self.last_dir or os.path.expanduser(\"~\"),\n getattr(self.data, 'name', ''))\n self.update_extension()\n writer = self.get_writer_selected()\n if not writer:\n return\n\n filename, writer, _ = filedialogs.open_filename_dialog_save(\n file_name, '', [writer],\n )\n if not filename:\n return\n\n self.filename = filename\n self.writer = writer\n self.last_dir = os.path.split(self.filename)[0]\n self.basename = os.path.basename(self.remove_extensions(filename))\n self.unconditional_save_file()\n self.adjust_label()\n\n def save_file(self):\n if self.data is None:\n return\n if not self.filename:\n self.save_file_as()\n else:\n try:\n self.writer.write(os.path.join(self.last_dir,\n self.basename + self.type_ext + self.compress_ext),\n self.data)\n except Exception as err_value:\n self.error(str(err_value))\n else:\n self.error()\n\n def update_extension(self):\n self.type_ext = [ext for name, ext, _ in FILE_TYPES if name == self.filetype][0]\n self.compress_ext = dict(COMPRESSIONS)[self.compression] if self.compress else ''\n\n def _update_text(self):\n self.update_extension()\n self.adjust_label()\n\n\nif __name__ == \"__main__\":\n import sys\n from AnyQt.QtWidgets import QApplication\n\n a = QApplication(sys.argv)\n table = Table(\"iris\")\n\n ow = OWSave()\n ow.show()\n ow.dataset(table)\n a.exec()\n ow.saveSettings()\n", "path": "Orange/widgets/data/owsave.py"}]}
| 2,428 | 293 |
gh_patches_debug_19356
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2595
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
@work does not catch exceptions raised by calling repr on function's args
Consider this app, which calls the decorated `_job`, passing in an object that doesn't implement `repr`:
```python
from textual import work
from textual.app import App, ComposeResult
from textual.worker import WorkerFailed
from textual.widgets import Static
class NoRepr():
def __repr__(self) -> str:
raise NotImplementedError("Don't call repr!")
class Repro(App):
def compose(self) -> ComposeResult:
yield Static("hi")
async def on_mount(self) -> None:
foo = NoRepr()
try:
worker = self._job(foo)
await worker.wait()
except WorkerFailed as e:
static = self.query_one(Static)
static.renderable = "caught"
@work(exclusive=False, exit_on_error=False)
def _job(self, foo: NoRepr) -> None:
return
if __name__ == "__main__":
app = Repro()
app.run()
```
Despite the try/catch block, this app crashes with this trace:
```
╭─────────────────────────────────────────────────────────── Traceback (most recent call last) ────────────────────────────────────────────────────────────╮
│ /home/tco/open/textual-bug-repro/repro.py:19 in on_mount │
│ │
│ 16 │ async def on_mount(self) -> None: ╭────────────────────── locals ───────────────────────╮ │
│ 17 │ │ foo = NoRepr() │ foo = <repr-error "Don't call repr!"> │ │
│ 18 │ │ try: │ self = Repro(title='Repro', classes={'-dark-mode'}) │ │
│ ❱ 19 │ │ │ worker = self._job(foo) ╰─────────────────────────────────────────────────────╯ │
│ 20 │ │ │ await worker.wait() │
│ 21 │ │ except WorkerFailed as e: │
│ 22 │ │ │ static = self.query_one(Static) │
│ │
│ /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/lib/python3.8/site-packages/textual/_work_decorator.py:90 in decorated │
│ │
│ 87 │ │ │ self = args[0] │
│ 88 │ │ │ assert isinstance(self, DOMNode) │
│ 89 │ │ │ │
│ ❱ 90 │ │ │ positional_arguments = ", ".join(repr(arg) for arg in args[1:]) │
│ 91 │ │ │ keyword_arguments = ", ".join( │
│ 92 │ │ │ │ f"{name}={value!r}" for name, value in kwargs.items() │
│ 93 │ │ │ ) │
│ │
│ ╭──────────────────────────────────────────── locals ─────────────────────────────────────────────╮ │
│ │ args = (Repro(title='Repro', classes={'-dark-mode'}), <repr-error "Don't call repr!">) │ │
│ │ DOMNode = <class 'textual.dom.DOMNode'> │ │
│ │ exclusive = False │ │
│ │ exit_on_error = False │ │
│ │ group = 'default' │ │
│ │ kwargs = {} │ │
│ │ method = <function Repro._job at 0x7f76d4e9ac10> │ │
│ │ name = '' │ │
│ │ self = Repro(title='Repro', classes={'-dark-mode'}) │ │
│ ╰─────────────────────────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/lib/python3.8/site-packages/textual/_work_decorator.py:90 in <genexpr> │
│ │
│ 87 │ │ │ self = args[0] ╭──────────────────── locals ─────────────────────╮ │
│ 88 │ │ │ assert isinstance(self, DOMNode) │ .0 = <tuple_iterator object at 0x7f76cdb42610> │ │
│ 89 │ │ │ │ arg = <repr-error "Don't call repr!"> │ │
│ ❱ 90 │ │ │ positional_arguments = ", ".join(repr(arg) for arg in args[1:]) ╰─────────────────────────────────────────────────╯ │
│ 91 │ │ │ keyword_arguments = ", ".join( │
│ 92 │ │ │ │ f"{name}={value!r}" for name, value in kwargs.items() │
│ 93 │ │ │ ) │
│ │
│ /home/tco/open/textual-bug-repro/repro.py:9 in __repr__ │
│ │
│ 6 ╭──────────────── locals ────────────────╮ │
│ 7 class NoRepr(): │ self = <repr-error "Don't call repr!"> │ │
│ 8 │ def __repr__(self) -> str: ╰────────────────────────────────────────╯ │
│ ❱ 9 │ │ raise NotImplementedError("Don't call repr!") │
│ 10 │
│ 11 class Repro(App): │
│ 12 │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
NotImplementedError: Don't call repr!
```
This might seem trivial, but this bit me [here](https://github.com/tconbeer/harlequin/issues/56), since DuckDB's DuckDBPyRelations raise errors on `repr` for some queries:
```python
>>> import duckdb
>>> conn = duckdb.connect(":memory:")
>>> rel = conn.sql("select 0::struct(a int)")
>>> repr(rel)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
duckdb.ConversionException: Conversion Error: Unimplemented type for cast (INTEGER -> STRUCT(a INTEGER))
```
Consequentially, this app, which attempts to fetch data from a DuckDBPyRelation in a @work-decorated function, will crash on some queries unless an additional `except` block is added to catch the DuckDB errors:
```python
import duckdb
from textual import work
from textual.app import App, ComposeResult
from textual.worker import WorkerFailed, get_current_worker
from textual.widgets import Static
class Repro(App):
def compose(self) -> ComposeResult:
yield Static("hi")
async def on_mount(self) -> None:
conn = duckdb.connect(":memory:")
rel = conn.sql("select 0::struct(a int)")
try:
worker = self.fetch_relation_data(rel)
await worker.wait()
except WorkerFailed as e:
static = self.query_one(Static)
static.renderable = f"WorkerFailed: {e.error}"
# uncomment to stop crashes
# except duckdb.Error as e:
# static = self.query_one(Static)
# static.renderable = f"DuckDB Error: {e}"
def set_data(self, data) -> None:
self.data = data
@work(exclusive=True, exit_on_error=False) # type: ignore
def fetch_relation_data(self, relation: duckdb.DuckDBPyRelation) -> None:
data = relation.fetchall()
worker = get_current_worker()
if not worker.is_cancelled:
self.call_from_thread(self.set_data, data)
if __name__ == "__main__":
app = Repro()
app.run()
```
# Textual Diagnostics
## Versions
| Name | Value |
|---------|--------|
| Textual | 0.24.1 |
| Rich | 13.3.5 |
## Python
| Name | Value |
|----------------|-----------------------------------------------------------------------------------|
| Version | 3.8.10 |
| Implementation | CPython |
| Compiler | GCC 9.3.0 |
| Executable | /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/bin/python |
## Operating System
| Name | Value |
|---------|-------------------------------------|
| System | Linux |
| Release | 5.10.60.1-microsoft-standard-WSL2 |
| Version | #1 SMP Wed Aug 25 23:20:18 UTC 2021 |
## Terminal
| Name | Value |
|----------------------|------------------|
| Terminal Application | Windows Terminal |
| TERM | xterm-256color |
| COLORTERM | *Not set* |
| FORCE_COLOR | *Not set* |
| NO_COLOR | *Not set* |
## Rich Console options
| Name | Value |
|----------------|----------------------|
| size | width=156, height=40 |
| legacy_windows | False |
| min_width | 1 |
| max_width | 156 |
| is_terminal | True |
| encoding | utf-8 |
| max_height | 40 |
| justify | None |
| overflow | None |
| no_wrap | False |
| highlight | None |
| markup | None |
| height | None |
</issue>
<code>
[start of src/textual/_work_decorator.py]
1 """
2
3 A decorator used to create [workers](/guide/workers).
4 """
5
6
7 from __future__ import annotations
8
9 from functools import partial, wraps
10 from typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload
11
12 from typing_extensions import ParamSpec, TypeAlias
13
14 if TYPE_CHECKING:
15 from .worker import Worker
16
17
18 FactoryParamSpec = ParamSpec("FactoryParamSpec")
19 DecoratorParamSpec = ParamSpec("DecoratorParamSpec")
20 ReturnType = TypeVar("ReturnType")
21
22 Decorator: TypeAlias = Callable[
23 [
24 Union[
25 Callable[DecoratorParamSpec, ReturnType],
26 Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],
27 ]
28 ],
29 Callable[DecoratorParamSpec, "Worker[ReturnType]"],
30 ]
31
32
33 @overload
34 def work(
35 method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]
36 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]:
37 ...
38
39
40 @overload
41 def work(
42 method: Callable[FactoryParamSpec, ReturnType]
43 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]:
44 ...
45
46
47 @overload
48 def work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:
49 ...
50
51
52 def work(
53 method: Callable[FactoryParamSpec, ReturnType]
54 | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]
55 | None = None,
56 *,
57 name: str = "",
58 group: str = "default",
59 exit_on_error: bool = True,
60 exclusive: bool = False,
61 ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:
62 """A decorator used to create [workers](/guide/workers).
63
64 Args:
65 method: A function or coroutine.
66 name: A short string to identify the worker (in logs and debugging).
67 group: A short string to identify a group of workers.
68 exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.
69 exclusive: Cancel all workers in the same group.
70 """
71
72 def decorator(
73 method: (
74 Callable[DecoratorParamSpec, ReturnType]
75 | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]
76 )
77 ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:
78 """The decorator."""
79
80 @wraps(method)
81 def decorated(
82 *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs
83 ) -> Worker[ReturnType]:
84 """The replaced callable."""
85 from .dom import DOMNode
86
87 self = args[0]
88 assert isinstance(self, DOMNode)
89
90 positional_arguments = ", ".join(repr(arg) for arg in args[1:])
91 keyword_arguments = ", ".join(
92 f"{name}={value!r}" for name, value in kwargs.items()
93 )
94 tokens = [positional_arguments, keyword_arguments]
95 worker_description = (
96 f"{method.__name__}({', '.join(token for token in tokens if token)})"
97 )
98 worker = cast(
99 "Worker[ReturnType]",
100 self.run_worker(
101 partial(method, *args, **kwargs),
102 name=name or method.__name__,
103 group=group,
104 description=worker_description,
105 exclusive=exclusive,
106 exit_on_error=exit_on_error,
107 ),
108 )
109 return worker
110
111 return decorated
112
113 if method is None:
114 return decorator
115 else:
116 return decorator(method)
117
[end of src/textual/_work_decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py
--- a/src/textual/_work_decorator.py
+++ b/src/textual/_work_decorator.py
@@ -87,14 +87,15 @@
self = args[0]
assert isinstance(self, DOMNode)
- positional_arguments = ", ".join(repr(arg) for arg in args[1:])
- keyword_arguments = ", ".join(
- f"{name}={value!r}" for name, value in kwargs.items()
- )
- tokens = [positional_arguments, keyword_arguments]
- worker_description = (
- f"{method.__name__}({', '.join(token for token in tokens if token)})"
- )
+ try:
+ positional_arguments = ", ".join(repr(arg) for arg in args[1:])
+ keyword_arguments = ", ".join(
+ f"{name}={value!r}" for name, value in kwargs.items()
+ )
+ tokens = [positional_arguments, keyword_arguments]
+ worker_description = f"{method.__name__}({', '.join(token for token in tokens if token)})"
+ except Exception:
+ worker_description = "<worker>"
worker = cast(
"Worker[ReturnType]",
self.run_worker(
|
{"golden_diff": "diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py\n--- a/src/textual/_work_decorator.py\n+++ b/src/textual/_work_decorator.py\n@@ -87,14 +87,15 @@\n self = args[0]\n assert isinstance(self, DOMNode)\n \n- positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n- keyword_arguments = \", \".join(\n- f\"{name}={value!r}\" for name, value in kwargs.items()\n- )\n- tokens = [positional_arguments, keyword_arguments]\n- worker_description = (\n- f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n- )\n+ try:\n+ positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n+ keyword_arguments = \", \".join(\n+ f\"{name}={value!r}\" for name, value in kwargs.items()\n+ )\n+ tokens = [positional_arguments, keyword_arguments]\n+ worker_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n+ except Exception:\n+ worker_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n", "issue": "@work does not catch exceptions raised by calling repr on function's args\nConsider this app, which calls the decorated `_job`, passing in an object that doesn't implement `repr`:\r\n\r\n```python\r\nfrom textual import work\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.worker import WorkerFailed\r\nfrom textual.widgets import Static\r\n\r\nclass NoRepr():\r\n def __repr__(self) -> str:\r\n raise NotImplementedError(\"Don't call repr!\")\r\n\r\nclass Repro(App):\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Static(\"hi\")\r\n\r\n async def on_mount(self) -> None:\r\n foo = NoRepr()\r\n try:\r\n worker = self._job(foo)\r\n await worker.wait()\r\n except WorkerFailed as e:\r\n static = self.query_one(Static)\r\n static.renderable = \"caught\"\r\n\r\n @work(exclusive=False, exit_on_error=False)\r\n def _job(self, foo: NoRepr) -> None:\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n app = Repro()\r\n app.run()\r\n```\r\n\r\nDespite the try/catch block, this app crashes with this trace:\r\n\r\n```\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 /home/tco/open/textual-bug-repro/repro.py:19 in on_mount \u2502\r\n\u2502 \u2502\r\n\u2502 16 \u2502 async def on_mount(self) -> None: \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 17 \u2502 \u2502 foo = NoRepr() \u2502 foo = <repr-error \"Don't call repr!\"> \u2502 \u2502\r\n\u2502 18 \u2502 \u2502 try: \u2502 self = Repro(title='Repro', classes={'-dark-mode'}) \u2502 \u2502\r\n\u2502 \u2771 19 \u2502 \u2502 \u2502 worker = self._job(foo) \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 20 \u2502 \u2502 \u2502 await worker.wait() \u2502\r\n\u2502 21 \u2502 \u2502 except WorkerFailed as e: \u2502\r\n\u2502 22 \u2502 \u2502 \u2502 static = self.query_one(Static) \u2502\r\n\u2502 \u2502\r\n\u2502 /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/lib/python3.8/site-packages/textual/_work_decorator.py:90 in decorated \u2502\r\n\u2502 \u2502\r\n\u2502 87 \u2502 \u2502 \u2502 self = args[0] \u2502\r\n\u2502 88 \u2502 \u2502 \u2502 assert isinstance(self, DOMNode) \u2502\r\n\u2502 89 \u2502 \u2502 \u2502 \u2502\r\n\u2502 \u2771 90 \u2502 \u2502 \u2502 positional_arguments = \", \".join(repr(arg) for arg in args[1:]) \u2502\r\n\u2502 91 \u2502 \u2502 \u2502 keyword_arguments = \", \".join( \u2502\r\n\u2502 92 \u2502 \u2502 \u2502 \u2502 f\"{name}={value!r}\" for name, value in kwargs.items() \u2502\r\n\u2502 93 \u2502 \u2502 \u2502 ) \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 args = (Repro(title='Repro', classes={'-dark-mode'}), <repr-error \"Don't call repr!\">) \u2502 \u2502\r\n\u2502 \u2502 DOMNode = <class 'textual.dom.DOMNode'> \u2502 \u2502\r\n\u2502 \u2502 exclusive = False \u2502 \u2502\r\n\u2502 \u2502 exit_on_error = False \u2502 \u2502\r\n\u2502 \u2502 group = 'default' \u2502 \u2502\r\n\u2502 \u2502 kwargs = {} \u2502 \u2502\r\n\u2502 \u2502 method = <function Repro._job at 0x7f76d4e9ac10> \u2502 \u2502\r\n\u2502 \u2502 name = '' \u2502 \u2502\r\n\u2502 \u2502 self = Repro(title='Repro', classes={'-dark-mode'}) \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2502\r\n\u2502 /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/lib/python3.8/site-packages/textual/_work_decorator.py:90 in <genexpr> \u2502\r\n\u2502 \u2502\r\n\u2502 87 \u2502 \u2502 \u2502 self = args[0] \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 88 \u2502 \u2502 \u2502 assert isinstance(self, DOMNode) \u2502 .0 = <tuple_iterator object at 0x7f76cdb42610> \u2502 \u2502\r\n\u2502 89 \u2502 \u2502 \u2502 \u2502 arg = <repr-error \"Don't call repr!\"> \u2502 \u2502\r\n\u2502 \u2771 90 \u2502 \u2502 \u2502 positional_arguments = \", \".join(repr(arg) for arg in args[1:]) \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 91 \u2502 \u2502 \u2502 keyword_arguments = \", \".join( \u2502\r\n\u2502 92 \u2502 \u2502 \u2502 \u2502 f\"{name}={value!r}\" for name, value in kwargs.items() \u2502\r\n\u2502 93 \u2502 \u2502 \u2502 ) \u2502\r\n\u2502 \u2502\r\n\u2502 /home/tco/open/textual-bug-repro/repro.py:9 in __repr__ \u2502\r\n\u2502 \u2502\r\n\u2502 6 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 7 class NoRepr(): \u2502 self = <repr-error \"Don't call repr!\"> \u2502 \u2502\r\n\u2502 8 \u2502 def __repr__(self) -> str: \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2502 \u2771 9 \u2502 \u2502 raise NotImplementedError(\"Don't call repr!\") \u2502\r\n\u2502 10 \u2502\r\n\u2502 11 class Repro(App): \u2502\r\n\u2502 12 \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nNotImplementedError: Don't call repr!\r\n```\r\n\r\nThis might seem trivial, but this bit me [here](https://github.com/tconbeer/harlequin/issues/56), since DuckDB's DuckDBPyRelations raise errors on `repr` for some queries:\r\n\r\n```python\r\n>>> import duckdb\r\n>>> conn = duckdb.connect(\":memory:\")\r\n>>> rel = conn.sql(\"select 0::struct(a int)\")\r\n>>> repr(rel)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nduckdb.ConversionException: Conversion Error: Unimplemented type for cast (INTEGER -> STRUCT(a INTEGER))\r\n```\r\n\r\nConsequentially, this app, which attempts to fetch data from a DuckDBPyRelation in a @work-decorated function, will crash on some queries unless an additional `except` block is added to catch the DuckDB errors:\r\n\r\n```python\r\nimport duckdb\r\nfrom textual import work\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.worker import WorkerFailed, get_current_worker\r\nfrom textual.widgets import Static\r\n\r\nclass Repro(App):\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Static(\"hi\")\r\n\r\n async def on_mount(self) -> None:\r\n conn = duckdb.connect(\":memory:\")\r\n rel = conn.sql(\"select 0::struct(a int)\")\r\n try:\r\n worker = self.fetch_relation_data(rel)\r\n await worker.wait()\r\n except WorkerFailed as e:\r\n static = self.query_one(Static)\r\n static.renderable = f\"WorkerFailed: {e.error}\"\r\n # uncomment to stop crashes\r\n # except duckdb.Error as e:\r\n # static = self.query_one(Static)\r\n # static.renderable = f\"DuckDB Error: {e}\"\r\n\r\n def set_data(self, data) -> None:\r\n self.data = data\r\n\r\n @work(exclusive=True, exit_on_error=False) # type: ignore\r\n def fetch_relation_data(self, relation: duckdb.DuckDBPyRelation) -> None:\r\n data = relation.fetchall()\r\n worker = get_current_worker()\r\n if not worker.is_cancelled:\r\n self.call_from_thread(self.set_data, data)\r\n\r\nif __name__ == \"__main__\":\r\n app = Repro()\r\n app.run()\r\n```\r\n\r\n# Textual Diagnostics\r\n\r\n## Versions\r\n\r\n| Name | Value |\r\n|---------|--------|\r\n| Textual | 0.24.1 |\r\n| Rich | 13.3.5 |\r\n\r\n## Python\r\n\r\n| Name | Value |\r\n|----------------|-----------------------------------------------------------------------------------|\r\n| Version | 3.8.10 |\r\n| Implementation | CPython |\r\n| Compiler | GCC 9.3.0 |\r\n| Executable | /home/tco/.cache/pypoetry/virtualenvs/textual-bug-repro-JJcVc8Rx-py3.8/bin/python |\r\n\r\n## Operating System\r\n\r\n| Name | Value |\r\n|---------|-------------------------------------|\r\n| System | Linux |\r\n| Release | 5.10.60.1-microsoft-standard-WSL2 |\r\n| Version | #1 SMP Wed Aug 25 23:20:18 UTC 2021 |\r\n\r\n## Terminal\r\n\r\n| Name | Value |\r\n|----------------------|------------------|\r\n| Terminal Application | Windows Terminal |\r\n| TERM | xterm-256color |\r\n| COLORTERM | *Not set* |\r\n| FORCE_COLOR | *Not set* |\r\n| NO_COLOR | *Not set* |\r\n\r\n## Rich Console options\r\n\r\n| Name | Value |\r\n|----------------|----------------------|\r\n| size | width=156, height=40 |\r\n| legacy_windows | False |\r\n| min_width | 1 |\r\n| max_width | 156 |\r\n| is_terminal | True |\r\n| encoding | utf-8 |\r\n| max_height | 40 |\r\n| justify | None |\r\n| overflow | None |\r\n| no_wrap | False |\r\n| highlight | None |\r\n| markup | None |\r\n| height | None |\n", "before_files": [{"content": "\"\"\"\n\nA decorator used to create [workers](/guide/workers).\n\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom functools import partial, wraps\nfrom typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload\n\nfrom typing_extensions import ParamSpec, TypeAlias\n\nif TYPE_CHECKING:\n from .worker import Worker\n\n\nFactoryParamSpec = ParamSpec(\"FactoryParamSpec\")\nDecoratorParamSpec = ParamSpec(\"DecoratorParamSpec\")\nReturnType = TypeVar(\"ReturnType\")\n\nDecorator: TypeAlias = Callable[\n [\n Union[\n Callable[DecoratorParamSpec, ReturnType],\n Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],\n ]\n ],\n Callable[DecoratorParamSpec, \"Worker[ReturnType]\"],\n]\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:\n ...\n\n\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n | None = None,\n *,\n name: str = \"\",\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n\n Args:\n method: A function or coroutine.\n name: A short string to identify the worker (in logs and debugging).\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n \"\"\"\n\n def decorator(\n method: (\n Callable[DecoratorParamSpec, ReturnType]\n | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]\n )\n ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:\n \"\"\"The decorator.\"\"\"\n\n @wraps(method)\n def decorated(\n *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs\n ) -> Worker[ReturnType]:\n \"\"\"The replaced callable.\"\"\"\n from .dom import DOMNode\n\n self = args[0]\n assert isinstance(self, DOMNode)\n\n positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n keyword_arguments = \", \".join(\n f\"{name}={value!r}\" for name, value in kwargs.items()\n )\n tokens = [positional_arguments, keyword_arguments]\n worker_description = (\n f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n )\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n description=worker_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n )\n return worker\n\n return decorated\n\n if method is None:\n return decorator\n else:\n return decorator(method)\n", "path": "src/textual/_work_decorator.py"}]}
| 3,830 | 283 |
gh_patches_debug_30057
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-259
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws help does not work
Hi,
I've just installed aws-cli using pip on a Ubuntu 13.04 amd64 and I'm unable to use the help command. I got the following errors:
```
$ aws ec2 help
[Errno 2] No such file or directory
grotty:<standard input>:2883:fatal error: output error
```
</issue>
<code>
[start of awscli/help.py]
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14 import logging
15 import os
16 import platform
17 from subprocess import Popen, PIPE
18
19 from docutils.core import publish_string
20 import bcdoc
21 from bcdoc.clidocs import ReSTDocument
22 from bcdoc.clidocs import ProviderDocumentEventHandler
23 from bcdoc.clidocs import ServiceDocumentEventHandler
24 from bcdoc.clidocs import OperationDocumentEventHandler
25 import bcdoc.clidocevents
26 from bcdoc.textwriter import TextWriter
27
28 from awscli.argprocess import ParamShorthand
29
30
31 LOG = logging.getLogger('awscli.help')
32
33
34 def get_renderer():
35 """
36 Return the appropriate HelpRenderer implementation for the
37 current platform.
38 """
39 if platform.system() == 'Windows':
40 return WindowsHelpRenderer()
41 else:
42 return PosixHelpRenderer()
43
44
45 class HelpRenderer(object):
46 """
47 Interface for a help renderer.
48
49 The renderer is responsible for displaying the help content on
50 a particular platform.
51 """
52
53 def render(self, contents):
54 """
55 Each implementation of HelpRenderer must implement this
56 render method.
57 """
58 pass
59
60
61 class PosixHelpRenderer(HelpRenderer):
62 """
63 Render help content on a Posix-like system. This includes
64 Linux and MacOS X.
65 """
66
67 PAGER = 'more'
68
69 def get_pager(self):
70 pager = self.PAGER
71 if 'MANPAGER' in os.environ:
72 pager = os.environ['MANPAGER']
73 elif 'PAGER' in os.environ:
74 pager = os.environ['PAGER']
75 return pager
76
77 def render(self, contents):
78 cmdline = ['rst2man.py']
79 LOG.debug("Running command: %s", cmdline)
80 p2 = Popen(cmdline, stdin=PIPE, stdout=PIPE)
81 p2.stdin.write(contents)
82 p2.stdin.close()
83 cmdline = ['groff', '-man', '-T', 'ascii']
84 LOG.debug("Running command: %s", cmdline)
85 p3 = Popen(cmdline, stdin=p2.stdout, stdout=PIPE)
86 pager = self.get_pager()
87 cmdline = [pager]
88 LOG.debug("Running command: %s", cmdline)
89 p4 = Popen(cmdline, stdin=p3.stdout)
90 p4.communicate()
91 sys.exit(1)
92
93
94 class WindowsHelpRenderer(HelpRenderer):
95 """
96 Render help content on a Windows platform.
97 """
98
99 def render(self, contents):
100 text_output = publish_string(contents,
101 writer=TextWriter())
102 sys.stdout.write(text_output.decode('utf-8'))
103 sys.exit(1)
104
105
106 class RawRenderer(HelpRenderer):
107 """
108 Render help as the raw ReST document.
109 """
110
111 def render(self, contents):
112 sys.stdout.write(contents)
113 sys.exit(1)
114
115
116 class HelpCommand(object):
117 """
118 HelpCommand Interface
119 ---------------------
120 A HelpCommand object acts as the interface between objects in the
121 CLI (e.g. Providers, Services, Operations, etc.) and the documentation
122 system (bcdoc).
123
124 A HelpCommand object wraps the object from the CLI space and provides
125 a consistent interface to critical information needed by the
126 documentation pipeline such as the object's name, description, etc.
127
128 The HelpCommand object is passed to the component of the
129 documentation pipeline that fires documentation events. It is
130 then passed on to each document event handler that has registered
131 for the events.
132
133 All HelpCommand objects contain the following attributes:
134
135 + ``session`` - A ``botocore`` ``Session`` object.
136 + ``obj`` - The object that is being documented.
137 + ``command_table`` - A dict mapping command names to
138 callable objects.
139 + ``arg_table`` - A dict mapping argument names to callable objects.
140 + ``doc`` - A ``Document`` object that is used to collect the
141 generated documentation.
142
143 In addition, please note the `properties` defined below which are
144 required to allow the object to be used in the document pipeline.
145
146 Implementations of HelpCommand are provided here for Provider,
147 Service and Operation objects. Other implementations for other
148 types of objects might be needed for customization in plugins.
149 As long as the implementations conform to this basic interface
150 it should be possible to pass them to the documentation system
151 and generate interactive and static help files.
152 """
153
154 EventHandlerClass = None
155 """
156 Each subclass should define this class variable to point to the
157 EventHandler class used by this HelpCommand.
158 """
159
160 def __init__(self, session, obj, command_table, arg_table):
161 self.session = session
162 self.obj = obj
163 self.command_table = command_table
164 self.arg_table = arg_table
165 self.renderer = get_renderer()
166 self.doc = ReSTDocument(target='man')
167
168 @property
169 def event_class(self):
170 """
171 Return the ``event_class`` for this object.
172
173 The ``event_class`` is used by the documentation pipeline
174 when generating documentation events. For the event below::
175
176 doc-title.<event_class>.<name>
177
178 The document pipeline would use this property to determine
179 the ``event_class`` value.
180 """
181 pass
182
183 @property
184 def name(self):
185 """
186 Return the name of the wrapped object.
187
188 This would be called by the document pipeline to determine
189 the ``name`` to be inserted into the event, as shown above.
190 """
191 pass
192
193 def __call__(self, args, parsed_globals):
194 # Create an event handler for a Provider Document
195 self.EventHandlerClass(self)
196 # Now generate all of the events for a Provider document.
197 # We pass ourselves along so that we can, in turn, get passed
198 # to all event handlers.
199 bcdoc.clidocevents.generate_events(self.session, self)
200 self.renderer.render(self.doc.getvalue())
201
202
203
204 class ProviderHelpCommand(HelpCommand):
205 """Implements top level help command.
206
207 This is what is called when ``aws help`` is run.
208
209 """
210 EventHandlerClass = ProviderDocumentEventHandler
211
212 def __init__(self, session, command_table, arg_table,
213 description, synopsis, usage):
214 HelpCommand.__init__(self, session, session.provider,
215 command_table, arg_table)
216 self.description = description
217 self.synopsis = synopsis
218 self.help_usage = usage
219
220
221 @property
222 def event_class(self):
223 return 'Provider'
224
225 @property
226 def name(self):
227 return self.obj.name
228
229
230 class ServiceHelpCommand(HelpCommand):
231 """Implements service level help.
232
233 This is the object invoked whenever a service command
234 help is implemented, e.g. ``aws ec2 help``.
235
236 """
237
238 EventHandlerClass = ServiceDocumentEventHandler
239
240 def __init__(self, session, obj, command_table, arg_table, name,
241 event_class):
242 super(ServiceHelpCommand, self).__init__(session, obj, command_table,
243 arg_table)
244 self._name = name
245 self._event_class = event_class
246
247 @property
248 def event_class(self):
249 return self._event_class
250
251 @property
252 def name(self):
253 return self._name
254
255
256 class OperationHelpCommand(HelpCommand):
257 """Implements operation level help.
258
259 This is the object invoked whenever help for a service is requested,
260 e.g. ``aws ec2 describe-instances help``.
261
262 """
263 EventHandlerClass = OperationDocumentEventHandler
264
265 def __init__(self, session, service, operation, arg_table, name,
266 event_class):
267 HelpCommand.__init__(self, session, operation, None, arg_table)
268 self.service = service
269 self.param_shorthand = ParamShorthand()
270 self._name = name
271 self._event_class = event_class
272
273 @property
274 def event_class(self):
275 return self._event_class
276
277 @property
278 def name(self):
279 return self._name
280
[end of awscli/help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/help.py b/awscli/help.py
--- a/awscli/help.py
+++ b/awscli/help.py
@@ -66,13 +66,13 @@
PAGER = 'more'
- def get_pager(self):
+ def get_pager_cmdline(self):
pager = self.PAGER
if 'MANPAGER' in os.environ:
pager = os.environ['MANPAGER']
elif 'PAGER' in os.environ:
pager = os.environ['PAGER']
- return pager
+ return pager.split()
def render(self, contents):
cmdline = ['rst2man.py']
@@ -83,8 +83,7 @@
cmdline = ['groff', '-man', '-T', 'ascii']
LOG.debug("Running command: %s", cmdline)
p3 = Popen(cmdline, stdin=p2.stdout, stdout=PIPE)
- pager = self.get_pager()
- cmdline = [pager]
+ cmdline = self.get_pager_cmdline()
LOG.debug("Running command: %s", cmdline)
p4 = Popen(cmdline, stdin=p3.stdout)
p4.communicate()
@@ -200,7 +199,6 @@
self.renderer.render(self.doc.getvalue())
-
class ProviderHelpCommand(HelpCommand):
"""Implements top level help command.
@@ -217,7 +215,6 @@
self.synopsis = synopsis
self.help_usage = usage
-
@property
def event_class(self):
return 'Provider'
|
{"golden_diff": "diff --git a/awscli/help.py b/awscli/help.py\n--- a/awscli/help.py\n+++ b/awscli/help.py\n@@ -66,13 +66,13 @@\n \n PAGER = 'more'\n \n- def get_pager(self):\n+ def get_pager_cmdline(self):\n pager = self.PAGER\n if 'MANPAGER' in os.environ:\n pager = os.environ['MANPAGER']\n elif 'PAGER' in os.environ:\n pager = os.environ['PAGER']\n- return pager\n+ return pager.split()\n \n def render(self, contents):\n cmdline = ['rst2man.py']\n@@ -83,8 +83,7 @@\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n p3 = Popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n- pager = self.get_pager()\n- cmdline = [pager]\n+ cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n p4 = Popen(cmdline, stdin=p3.stdout)\n p4.communicate()\n@@ -200,7 +199,6 @@\n self.renderer.render(self.doc.getvalue())\n \n \n-\n class ProviderHelpCommand(HelpCommand):\n \"\"\"Implements top level help command.\n \n@@ -217,7 +215,6 @@\n self.synopsis = synopsis\n self.help_usage = usage\n \n-\n @property\n def event_class(self):\n return 'Provider'\n", "issue": "aws help does not work\nHi,\n\nI've just installed aws-cli using pip on a Ubuntu 13.04 amd64 and I'm unable to use the help command. I got the following errors:\n\n```\n$ aws ec2 help\n[Errno 2] No such file or directory\ngrotty:<standard input>:2883:fatal error: output error\n```\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport logging\nimport os\nimport platform\nfrom subprocess import Popen, PIPE\n\nfrom docutils.core import publish_string\nimport bcdoc\nfrom bcdoc.clidocs import ReSTDocument\nfrom bcdoc.clidocs import ProviderDocumentEventHandler\nfrom bcdoc.clidocs import ServiceDocumentEventHandler\nfrom bcdoc.clidocs import OperationDocumentEventHandler\nimport bcdoc.clidocevents\nfrom bcdoc.textwriter import TextWriter\n\nfrom awscli.argprocess import ParamShorthand\n\n\nLOG = logging.getLogger('awscli.help')\n\n\ndef get_renderer():\n \"\"\"\n Return the appropriate HelpRenderer implementation for the\n current platform.\n \"\"\"\n if platform.system() == 'Windows':\n return WindowsHelpRenderer()\n else:\n return PosixHelpRenderer()\n\n\nclass HelpRenderer(object):\n \"\"\"\n Interface for a help renderer.\n\n The renderer is responsible for displaying the help content on\n a particular platform.\n \"\"\"\n\n def render(self, contents):\n \"\"\"\n Each implementation of HelpRenderer must implement this\n render method.\n \"\"\"\n pass\n\n\nclass PosixHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Posix-like system. This includes\n Linux and MacOS X.\n \"\"\"\n\n PAGER = 'more'\n\n def get_pager(self):\n pager = self.PAGER\n if 'MANPAGER' in os.environ:\n pager = os.environ['MANPAGER']\n elif 'PAGER' in os.environ:\n pager = os.environ['PAGER']\n return pager\n\n def render(self, contents):\n cmdline = ['rst2man.py']\n LOG.debug(\"Running command: %s\", cmdline)\n p2 = Popen(cmdline, stdin=PIPE, stdout=PIPE)\n p2.stdin.write(contents)\n p2.stdin.close()\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n p3 = Popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n pager = self.get_pager()\n cmdline = [pager]\n LOG.debug(\"Running command: %s\", cmdline)\n p4 = Popen(cmdline, stdin=p3.stdout)\n p4.communicate()\n sys.exit(1)\n\n\nclass WindowsHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Windows platform.\n \"\"\"\n\n def render(self, contents):\n text_output = publish_string(contents,\n writer=TextWriter())\n sys.stdout.write(text_output.decode('utf-8'))\n sys.exit(1)\n\n\nclass RawRenderer(HelpRenderer):\n \"\"\"\n Render help as the raw ReST document.\n \"\"\"\n\n def render(self, contents):\n sys.stdout.write(contents)\n sys.exit(1)\n\n\nclass HelpCommand(object):\n \"\"\"\n HelpCommand Interface\n ---------------------\n A HelpCommand object acts as the interface between objects in the\n CLI (e.g. Providers, Services, Operations, etc.) and the documentation\n system (bcdoc).\n\n A HelpCommand object wraps the object from the CLI space and provides\n a consistent interface to critical information needed by the\n documentation pipeline such as the object's name, description, etc.\n\n The HelpCommand object is passed to the component of the\n documentation pipeline that fires documentation events. It is\n then passed on to each document event handler that has registered\n for the events.\n\n All HelpCommand objects contain the following attributes:\n\n + ``session`` - A ``botocore`` ``Session`` object.\n + ``obj`` - The object that is being documented.\n + ``command_table`` - A dict mapping command names to\n callable objects.\n + ``arg_table`` - A dict mapping argument names to callable objects.\n + ``doc`` - A ``Document`` object that is used to collect the\n generated documentation.\n\n In addition, please note the `properties` defined below which are\n required to allow the object to be used in the document pipeline.\n\n Implementations of HelpCommand are provided here for Provider,\n Service and Operation objects. Other implementations for other\n types of objects might be needed for customization in plugins.\n As long as the implementations conform to this basic interface\n it should be possible to pass them to the documentation system\n and generate interactive and static help files.\n \"\"\"\n\n EventHandlerClass = None\n \"\"\"\n Each subclass should define this class variable to point to the\n EventHandler class used by this HelpCommand.\n \"\"\"\n\n def __init__(self, session, obj, command_table, arg_table):\n self.session = session\n self.obj = obj\n self.command_table = command_table\n self.arg_table = arg_table\n self.renderer = get_renderer()\n self.doc = ReSTDocument(target='man')\n\n @property\n def event_class(self):\n \"\"\"\n Return the ``event_class`` for this object.\n\n The ``event_class`` is used by the documentation pipeline\n when generating documentation events. For the event below::\n\n doc-title.<event_class>.<name>\n\n The document pipeline would use this property to determine\n the ``event_class`` value.\n \"\"\"\n pass\n\n @property\n def name(self):\n \"\"\"\n Return the name of the wrapped object.\n\n This would be called by the document pipeline to determine\n the ``name`` to be inserted into the event, as shown above.\n \"\"\"\n pass\n\n def __call__(self, args, parsed_globals):\n # Create an event handler for a Provider Document\n self.EventHandlerClass(self)\n # Now generate all of the events for a Provider document.\n # We pass ourselves along so that we can, in turn, get passed\n # to all event handlers.\n bcdoc.clidocevents.generate_events(self.session, self)\n self.renderer.render(self.doc.getvalue())\n\n\n\nclass ProviderHelpCommand(HelpCommand):\n \"\"\"Implements top level help command.\n\n This is what is called when ``aws help`` is run.\n\n \"\"\"\n EventHandlerClass = ProviderDocumentEventHandler\n\n def __init__(self, session, command_table, arg_table,\n description, synopsis, usage):\n HelpCommand.__init__(self, session, session.provider,\n command_table, arg_table)\n self.description = description\n self.synopsis = synopsis\n self.help_usage = usage\n\n\n @property\n def event_class(self):\n return 'Provider'\n\n @property\n def name(self):\n return self.obj.name\n\n\nclass ServiceHelpCommand(HelpCommand):\n \"\"\"Implements service level help.\n\n This is the object invoked whenever a service command\n help is implemented, e.g. ``aws ec2 help``.\n\n \"\"\"\n\n EventHandlerClass = ServiceDocumentEventHandler\n\n def __init__(self, session, obj, command_table, arg_table, name,\n event_class):\n super(ServiceHelpCommand, self).__init__(session, obj, command_table,\n arg_table)\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n\n\nclass OperationHelpCommand(HelpCommand):\n \"\"\"Implements operation level help.\n\n This is the object invoked whenever help for a service is requested,\n e.g. ``aws ec2 describe-instances help``.\n\n \"\"\"\n EventHandlerClass = OperationDocumentEventHandler\n\n def __init__(self, session, service, operation, arg_table, name,\n event_class):\n HelpCommand.__init__(self, session, operation, None, arg_table)\n self.service = service\n self.param_shorthand = ParamShorthand()\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n", "path": "awscli/help.py"}]}
| 3,219 | 347 |
gh_patches_debug_40054
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-435
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiple issues with write_geotiff() method
### Expected behaviour
I should be able to write a small GeoTiff file using the `datacube.helpers.write_geotiff()` method.
I should also not have to specify a `time_index`.
### Actual behaviour
For a 50x50 raster it fails with:
```python
ValueError: blockxsize exceeds raster width
```
And if `time_index` is not specified it fails with:
```pytb
Exception ignored in: 'pandas._libs.lib.is_bool_array'
TypeError: len() of unsized object
Traceback (most recent call last):
File "testlanwei.py", line 30, in <module>
write_geotiff(filename='H8_test_1.tif', dataset=data, profile_override={'blockxsize':16, 'blockysize':16})
File "/home/547/dra547/github/datacube-core/datacube/helpers.py", line 48, in write_geotiff
dest.write(data.isel(time=time_index).data, bandnum)
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/dataarray.py", line 679, in isel
ds = self._to_temp_dataset().isel(drop=drop, **indexers)
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/dataset.py", line 1143, in isel
new_var = var.isel(**var_indexers)
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/variable.py", line 570, in isel
return self[tuple(key)]
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/variable.py", line 1225, in __getitem__
values = self._indexable_data[key]
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/indexing.py", line 545, in __getitem__
result = self.array[key]
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py", line 274, in __getitem__
if com.is_bool_indexer(key):
File "/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/pandas/core/common.py", line 189, in is_bool_indexer
raise ValueError('cannot index with vector containing '
ValueError: cannot index with vector containing NA / NaN values
```
The function should be changed to take more useful parameters, and should have unit test coverage.
### Steps to reproduce the behaviour
Run the following script [broken_geotiff_write.txt](https://github.com/opendatacube/datacube-core/files/1204141/broken_geotiff_write.txt).
### Environment information
* Which ``datacube --version`` are you using?
`1.5.1`
* What datacube deployment/enviornment are you running against?
`agdc-py3-prod/1.5.1`
</issue>
<code>
[start of datacube/helpers.py]
1 """
2 Useful functions for Datacube users
3
4 Not used internally, those should go in `utils.py`
5 """
6 from __future__ import absolute_import
7 import rasterio
8 import numpy as np
9
10 DEFAULT_PROFILE = {
11 'blockxsize': 256,
12 'blockysize': 256,
13 'compress': 'lzw',
14 'driver': 'GTiff',
15 'interleave': 'band',
16 'nodata': 0.0,
17 'photometric': 'RGBA',
18 'tiled': True}
19
20
21 def write_geotiff(filename, dataset, time_index=None, profile_override=None):
22 """
23 Write an xarray dataset to a geotiff
24
25 :param filename: Output filename
26 :attr dataset: xarray dataset containing multiple bands to write to file
27 :attr time_index: time index to write to file
28 :attr profile_override: option dict, overrides rasterio file creation options.
29 """
30 profile_override = profile_override or {}
31
32 try:
33 dtypes = {val.dtype for val in dataset.data_vars.values()}
34 assert len(dtypes) == 1 # Check for multiple dtypes
35 except AttributeError:
36 dtypes = [dataset.dtype]
37
38 profile = DEFAULT_PROFILE.copy()
39 profile.update({
40 'width': dataset.dims[dataset.crs.dimensions[1]],
41 'height': dataset.dims[dataset.crs.dimensions[0]],
42 'affine': dataset.affine,
43 'crs': dataset.crs.crs_str,
44 'count': len(dataset.data_vars),
45 'dtype': str(dtypes.pop())
46 })
47 profile.update(profile_override)
48
49 with rasterio.open(str(filename), 'w', **profile) as dest:
50 if hasattr(dataset, 'data_vars'):
51 for bandnum, data in enumerate(dataset.data_vars.values(), start=1):
52 dest.write(data.isel(time=time_index).data, bandnum)
53 else: # Assume that we have a DataArray
54 dest.write(dataset.isel(time=time_index).data, 0)
55
56
57 def ga_pq_fuser(dest, src):
58 """
59 Fuse two Geoscience Australia Pixel Quality ndarrays
60
61 To be used as a `fuse_func` when loaded `grouped` data, for example when grouping
62 by solar day to avoid duplicate data from scene overlaps.
63 """
64 valid_bit = 8
65 valid_val = (1 << valid_bit)
66
67 no_data_dest_mask = ~(dest & valid_val).astype(bool)
68 np.copyto(dest, src, where=no_data_dest_mask)
69
70 both_data_mask = (valid_val & dest & src).astype(bool)
71 np.copyto(dest, src & dest, where=both_data_mask)
72
[end of datacube/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/datacube/helpers.py b/datacube/helpers.py
--- a/datacube/helpers.py
+++ b/datacube/helpers.py
@@ -4,8 +4,9 @@
Not used internally, those should go in `utils.py`
"""
from __future__ import absolute_import
-import rasterio
+
import numpy as np
+import rasterio
DEFAULT_PROFILE = {
'blockxsize': 256,
@@ -18,17 +19,22 @@
'tiled': True}
-def write_geotiff(filename, dataset, time_index=None, profile_override=None):
+def write_geotiff(filename, dataset, profile_override=None, time_index=None):
"""
- Write an xarray dataset to a geotiff
+ Write an ODC style xarray.Dataset to a GeoTIFF file.
:param filename: Output filename
- :attr dataset: xarray dataset containing multiple bands to write to file
- :attr time_index: time index to write to file
- :attr profile_override: option dict, overrides rasterio file creation options.
+ :param dataset: xarray dataset containing one or more bands to write to a file.
+ :param profile_override: option dict, overrides rasterio file creation options.
+ :param time_index: DEPRECATED
"""
profile_override = profile_override or {}
+ if time_index is not None:
+ raise ValueError('''The write_geotiff function no longer supports passing in `time_index`.
+ The same function can be achieved by calling `dataset.isel(time=<time_index>)` before passing
+ in your dataset. It was removed because it made the function much less useful for more advanced cases.''')
+
try:
dtypes = {val.dtype for val in dataset.data_vars.values()}
assert len(dtypes) == 1 # Check for multiple dtypes
@@ -39,19 +45,35 @@
profile.update({
'width': dataset.dims[dataset.crs.dimensions[1]],
'height': dataset.dims[dataset.crs.dimensions[0]],
- 'affine': dataset.affine,
+ 'transform': dataset.affine,
'crs': dataset.crs.crs_str,
'count': len(dataset.data_vars),
'dtype': str(dtypes.pop())
})
profile.update(profile_override)
+ _calculate_blocksize(profile)
+
with rasterio.open(str(filename), 'w', **profile) as dest:
if hasattr(dataset, 'data_vars'):
for bandnum, data in enumerate(dataset.data_vars.values(), start=1):
- dest.write(data.isel(time=time_index).data, bandnum)
- else: # Assume that we have a DataArray
- dest.write(dataset.isel(time=time_index).data, 0)
+ dest.write(data.data, bandnum)
+
+
+def _calculate_blocksize(profile):
+ # Block size must be smaller than the image size, and for geotiffs must be divisible by 16
+ # Fix for small images.
+ if profile['blockxsize'] > profile['width']:
+ if profile['width'] % 16 == 0 or profile['width'] < 16:
+ profile['blockxsize'] = profile['width']
+ else:
+ profile['blockxsize'] = 16
+
+ if profile['blockysize'] > profile['height']:
+ if profile['height'] % 16 == 0 or profile['height'] < 16:
+ profile['blockysize'] = profile['height']
+ else:
+ profile['blockysize'] = 16
def ga_pq_fuser(dest, src):
|
{"golden_diff": "diff --git a/datacube/helpers.py b/datacube/helpers.py\n--- a/datacube/helpers.py\n+++ b/datacube/helpers.py\n@@ -4,8 +4,9 @@\n Not used internally, those should go in `utils.py`\n \"\"\"\n from __future__ import absolute_import\n-import rasterio\n+\n import numpy as np\n+import rasterio\n \n DEFAULT_PROFILE = {\n 'blockxsize': 256,\n@@ -18,17 +19,22 @@\n 'tiled': True}\n \n \n-def write_geotiff(filename, dataset, time_index=None, profile_override=None):\n+def write_geotiff(filename, dataset, profile_override=None, time_index=None):\n \"\"\"\n- Write an xarray dataset to a geotiff\n+ Write an ODC style xarray.Dataset to a GeoTIFF file.\n \n :param filename: Output filename\n- :attr dataset: xarray dataset containing multiple bands to write to file\n- :attr time_index: time index to write to file\n- :attr profile_override: option dict, overrides rasterio file creation options.\n+ :param dataset: xarray dataset containing one or more bands to write to a file.\n+ :param profile_override: option dict, overrides rasterio file creation options.\n+ :param time_index: DEPRECATED\n \"\"\"\n profile_override = profile_override or {}\n \n+ if time_index is not None:\n+ raise ValueError('''The write_geotiff function no longer supports passing in `time_index`.\n+ The same function can be achieved by calling `dataset.isel(time=<time_index>)` before passing\n+ in your dataset. It was removed because it made the function much less useful for more advanced cases.''')\n+\n try:\n dtypes = {val.dtype for val in dataset.data_vars.values()}\n assert len(dtypes) == 1 # Check for multiple dtypes\n@@ -39,19 +45,35 @@\n profile.update({\n 'width': dataset.dims[dataset.crs.dimensions[1]],\n 'height': dataset.dims[dataset.crs.dimensions[0]],\n- 'affine': dataset.affine,\n+ 'transform': dataset.affine,\n 'crs': dataset.crs.crs_str,\n 'count': len(dataset.data_vars),\n 'dtype': str(dtypes.pop())\n })\n profile.update(profile_override)\n \n+ _calculate_blocksize(profile)\n+\n with rasterio.open(str(filename), 'w', **profile) as dest:\n if hasattr(dataset, 'data_vars'):\n for bandnum, data in enumerate(dataset.data_vars.values(), start=1):\n- dest.write(data.isel(time=time_index).data, bandnum)\n- else: # Assume that we have a DataArray\n- dest.write(dataset.isel(time=time_index).data, 0)\n+ dest.write(data.data, bandnum)\n+\n+\n+def _calculate_blocksize(profile):\n+ # Block size must be smaller than the image size, and for geotiffs must be divisible by 16\n+ # Fix for small images.\n+ if profile['blockxsize'] > profile['width']:\n+ if profile['width'] % 16 == 0 or profile['width'] < 16:\n+ profile['blockxsize'] = profile['width']\n+ else:\n+ profile['blockxsize'] = 16\n+\n+ if profile['blockysize'] > profile['height']:\n+ if profile['height'] % 16 == 0 or profile['height'] < 16:\n+ profile['blockysize'] = profile['height']\n+ else:\n+ profile['blockysize'] = 16\n \n \n def ga_pq_fuser(dest, src):\n", "issue": "Multiple issues with write_geotiff() method\n### Expected behaviour\r\nI should be able to write a small GeoTiff file using the `datacube.helpers.write_geotiff()` method.\r\n\r\nI should also not have to specify a `time_index`.\r\n\r\n\r\n### Actual behaviour\r\n\r\n For a 50x50 raster it fails with:\r\n```python\r\n ValueError: blockxsize exceeds raster width\r\n```\r\n\r\nAnd if `time_index` is not specified it fails with:\r\n```pytb\r\nException ignored in: 'pandas._libs.lib.is_bool_array'\r\nTypeError: len() of unsized object\r\nTraceback (most recent call last):\r\n File \"testlanwei.py\", line 30, in <module>\r\n write_geotiff(filename='H8_test_1.tif', dataset=data, profile_override={'blockxsize':16, 'blockysize':16})\r\n File \"/home/547/dra547/github/datacube-core/datacube/helpers.py\", line 48, in write_geotiff\r\n dest.write(data.isel(time=time_index).data, bandnum)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/dataarray.py\", line 679, in isel\r\n ds = self._to_temp_dataset().isel(drop=drop, **indexers)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/dataset.py\", line 1143, in isel\r\n new_var = var.isel(**var_indexers)\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/variable.py\", line 570, in isel\r\n return self[tuple(key)]\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/variable.py\", line 1225, in __getitem__\r\n values = self._indexable_data[key]\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/xarray/core/indexing.py\", line 545, in __getitem__\r\n result = self.array[key]\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py\", line 274, in __getitem__\r\n if com.is_bool_indexer(key):\r\n File \"/g/data/v10/public/modules/agdc-py3-env/20170713/envs/agdc/lib/python3.6/site-packages/pandas/core/common.py\", line 189, in is_bool_indexer\r\n raise ValueError('cannot index with vector containing '\r\nValueError: cannot index with vector containing NA / NaN values\r\n```\r\n\r\n\r\nThe function should be changed to take more useful parameters, and should have unit test coverage. \r\n\r\n\r\n### Steps to reproduce the behaviour\r\n\r\nRun the following script [broken_geotiff_write.txt](https://github.com/opendatacube/datacube-core/files/1204141/broken_geotiff_write.txt).\r\n\r\n\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n`1.5.1`\r\n* What datacube deployment/enviornment are you running against?\r\n`agdc-py3-prod/1.5.1`\n", "before_files": [{"content": "\"\"\"\nUseful functions for Datacube users\n\nNot used internally, those should go in `utils.py`\n\"\"\"\nfrom __future__ import absolute_import\nimport rasterio\nimport numpy as np\n\nDEFAULT_PROFILE = {\n 'blockxsize': 256,\n 'blockysize': 256,\n 'compress': 'lzw',\n 'driver': 'GTiff',\n 'interleave': 'band',\n 'nodata': 0.0,\n 'photometric': 'RGBA',\n 'tiled': True}\n\n\ndef write_geotiff(filename, dataset, time_index=None, profile_override=None):\n \"\"\"\n Write an xarray dataset to a geotiff\n\n :param filename: Output filename\n :attr dataset: xarray dataset containing multiple bands to write to file\n :attr time_index: time index to write to file\n :attr profile_override: option dict, overrides rasterio file creation options.\n \"\"\"\n profile_override = profile_override or {}\n\n try:\n dtypes = {val.dtype for val in dataset.data_vars.values()}\n assert len(dtypes) == 1 # Check for multiple dtypes\n except AttributeError:\n dtypes = [dataset.dtype]\n\n profile = DEFAULT_PROFILE.copy()\n profile.update({\n 'width': dataset.dims[dataset.crs.dimensions[1]],\n 'height': dataset.dims[dataset.crs.dimensions[0]],\n 'affine': dataset.affine,\n 'crs': dataset.crs.crs_str,\n 'count': len(dataset.data_vars),\n 'dtype': str(dtypes.pop())\n })\n profile.update(profile_override)\n\n with rasterio.open(str(filename), 'w', **profile) as dest:\n if hasattr(dataset, 'data_vars'):\n for bandnum, data in enumerate(dataset.data_vars.values(), start=1):\n dest.write(data.isel(time=time_index).data, bandnum)\n else: # Assume that we have a DataArray\n dest.write(dataset.isel(time=time_index).data, 0)\n\n\ndef ga_pq_fuser(dest, src):\n \"\"\"\n Fuse two Geoscience Australia Pixel Quality ndarrays\n\n To be used as a `fuse_func` when loaded `grouped` data, for example when grouping\n by solar day to avoid duplicate data from scene overlaps.\n \"\"\"\n valid_bit = 8\n valid_val = (1 << valid_bit)\n\n no_data_dest_mask = ~(dest & valid_val).astype(bool)\n np.copyto(dest, src, where=no_data_dest_mask)\n\n both_data_mask = (valid_val & dest & src).astype(bool)\n np.copyto(dest, src & dest, where=both_data_mask)\n", "path": "datacube/helpers.py"}]}
| 2,065 | 809 |
gh_patches_debug_40259
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-3809
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add query to fetch draft orders
In the Dashboard 2.0 we want to have separate links to view draft orders and confirmed ones. For that reason we need to add a separate resolver to return only draft orders. Exiting `orders` resolver should be changed to return confirmed orders.
</issue>
<code>
[start of saleor/graphql/order/resolvers.py]
1 import graphene
2 import graphene_django_optimizer as gql_optimizer
3
4 from ...order import OrderEvents, OrderStatus, models
5 from ...order.utils import sum_order_totals
6 from ..utils import filter_by_period, filter_by_query_param
7 from .enums import OrderStatusFilter
8 from .types import Order
9 from .utils import applicable_shipping_methods
10
11 ORDER_SEARCH_FIELDS = (
12 'id', 'discount_name', 'token', 'user_email', 'user__email')
13
14
15 def resolve_orders(info, created, status, query):
16 user = info.context.user
17 if user.has_perm('order.manage_orders'):
18 qs = models.Order.objects.all()
19 else:
20 qs = user.orders.confirmed()
21 qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)
22
23 # filter orders by status
24 if status is not None:
25 if status == OrderStatusFilter.READY_TO_FULFILL:
26 qs = qs.ready_to_fulfill()
27 elif status == OrderStatusFilter.READY_TO_CAPTURE:
28 qs = qs.ready_to_capture()
29
30 # filter orders by creation date
31 if created is not None:
32 qs = filter_by_period(qs, created, 'created')
33
34 return gql_optimizer.query(qs, info)
35
36
37 def resolve_orders_total(info, period):
38 qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)
39 qs = filter_by_period(qs, period, 'created')
40 return sum_order_totals(qs)
41
42
43 def resolve_order(info, id):
44 """Return order only for user assigned to it or proper staff user."""
45 user = info.context.user
46 order = graphene.Node.get_node_from_global_id(info, id, Order)
47 if user.has_perm('order.manage_orders') or order.user == user:
48 return order
49 return None
50
51
52 def resolve_shipping_methods(obj, info, price):
53 return applicable_shipping_methods(obj, info, price)
54
55
56 def resolve_homepage_events(info):
57 # Filter only selected events to be displayed on homepage.
58 types = [
59 OrderEvents.PLACED.value, OrderEvents.PLACED_FROM_DRAFT.value,
60 OrderEvents.ORDER_FULLY_PAID.value]
61 return models.OrderEvent.objects.filter(type__in=types)
62
63
64 def resolve_order_by_token(info, token):
65 return models.Order.objects.filter(token=token).first()
66
[end of saleor/graphql/order/resolvers.py]
[start of saleor/graphql/order/schema.py]
1 from textwrap import dedent
2
3 import graphene
4 from graphql_jwt.decorators import login_required, permission_required
5
6 from ..core.enums import ReportingPeriod
7 from ..core.fields import PrefetchingConnectionField
8 from ..core.types import TaxedMoney
9 from ..descriptions import DESCRIPTIONS
10 from .bulk_mutations.draft_orders import DraftOrderBulkDelete
11 from .enums import OrderStatusFilter
12 from .mutations.draft_orders import (
13 DraftOrderComplete, DraftOrderCreate, DraftOrderDelete,
14 DraftOrderLineDelete, DraftOrderLinesCreate, DraftOrderLineUpdate,
15 DraftOrderUpdate)
16 from .mutations.fulfillments import (
17 FulfillmentCancel, FulfillmentCreate, FulfillmentUpdateTracking)
18 from .mutations.orders import (
19 OrderAddNote, OrderCancel, OrderCapture, OrderMarkAsPaid, OrderRefund,
20 OrderUpdate, OrderUpdateShipping, OrderVoid)
21 from .resolvers import (
22 resolve_homepage_events, resolve_order, resolve_order_by_token,
23 resolve_orders, resolve_orders_total)
24 from .types import Order, OrderEvent
25
26
27 class OrderQueries(graphene.ObjectType):
28 homepage_events = PrefetchingConnectionField(
29 OrderEvent, description=dedent('''List of activity events to display on
30 homepage (at the moment it only contains order-events).'''))
31 order = graphene.Field(
32 Order, description='Lookup an order by ID.',
33 id=graphene.Argument(graphene.ID, required=True))
34 orders = PrefetchingConnectionField(
35 Order,
36 query=graphene.String(description=DESCRIPTIONS['order']),
37 created=graphene.Argument(
38 ReportingPeriod,
39 description='Filter orders from a selected timespan.'),
40 status=graphene.Argument(
41 OrderStatusFilter, description='Filter order by status'),
42 description='List of the shop\'s orders.')
43 orders_total = graphene.Field(
44 TaxedMoney, description='Total sales.',
45 period=graphene.Argument(
46 ReportingPeriod,
47 description='Get total sales for selected span of time.'))
48 order_by_token = graphene.Field(
49 Order, description='Lookup an order by token.',
50 token=graphene.Argument(graphene.String, required=True))
51
52 @permission_required('order.manage_orders')
53 def resolve_homepage_events(self, info, **kwargs):
54 return resolve_homepage_events(info)
55
56 @login_required
57 def resolve_order(self, info, id):
58 return resolve_order(info, id)
59
60 @login_required
61 def resolve_orders(
62 self, info, created=None, status=None, query=None, **kwargs):
63 return resolve_orders(info, created, status, query)
64
65 @permission_required('order.manage_orders')
66 def resolve_orders_total(self, info, period, **kwargs):
67 return resolve_orders_total(info, period)
68
69 def resolve_order_by_token(self, info, token):
70 return resolve_order_by_token(info, token)
71
72
73 class OrderMutations(graphene.ObjectType):
74 draft_order_complete = DraftOrderComplete.Field()
75 draft_order_create = DraftOrderCreate.Field()
76 draft_order_delete = DraftOrderDelete.Field()
77 draft_order_bulk_delete = DraftOrderBulkDelete.Field()
78 draft_order_lines_create = DraftOrderLinesCreate.Field()
79 draft_order_line_delete = DraftOrderLineDelete.Field()
80 draft_order_line_update = DraftOrderLineUpdate.Field()
81 draft_order_update = DraftOrderUpdate.Field()
82
83 order_add_note = OrderAddNote.Field()
84 order_cancel = OrderCancel.Field()
85 order_capture = OrderCapture.Field()
86 order_fulfillment_cancel = FulfillmentCancel.Field()
87 order_fulfillment_create = FulfillmentCreate.Field()
88 order_fulfillment_update_tracking = FulfillmentUpdateTracking.Field()
89 order_mark_as_paid = OrderMarkAsPaid.Field()
90 order_refund = OrderRefund.Field()
91 order_update = OrderUpdate.Field()
92 order_update_shipping = OrderUpdateShipping.Field()
93 order_void = OrderVoid.Field()
94
[end of saleor/graphql/order/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/graphql/order/resolvers.py b/saleor/graphql/order/resolvers.py
--- a/saleor/graphql/order/resolvers.py
+++ b/saleor/graphql/order/resolvers.py
@@ -12,12 +12,7 @@
'id', 'discount_name', 'token', 'user_email', 'user__email')
-def resolve_orders(info, created, status, query):
- user = info.context.user
- if user.has_perm('order.manage_orders'):
- qs = models.Order.objects.all()
- else:
- qs = user.orders.confirmed()
+def filter_orders(qs, info, created, status, query):
qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)
# filter orders by status
@@ -34,6 +29,20 @@
return gql_optimizer.query(qs, info)
+def resolve_orders(info, created, status, query):
+ user = info.context.user
+ if user.has_perm('order.manage_orders'):
+ qs = models.Order.objects.confirmed()
+ else:
+ qs = user.orders.confirmed()
+ return filter_orders(qs, info, created, status, query)
+
+
+def resolve_draft_orders(info, created, query):
+ qs = models.Order.objects.drafts()
+ return filter_orders(qs, info, created, None, query)
+
+
def resolve_orders_total(info, period):
qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)
qs = filter_by_period(qs, period, 'created')
diff --git a/saleor/graphql/order/schema.py b/saleor/graphql/order/schema.py
--- a/saleor/graphql/order/schema.py
+++ b/saleor/graphql/order/schema.py
@@ -19,8 +19,8 @@
OrderAddNote, OrderCancel, OrderCapture, OrderMarkAsPaid, OrderRefund,
OrderUpdate, OrderUpdateShipping, OrderVoid)
from .resolvers import (
- resolve_homepage_events, resolve_order, resolve_order_by_token,
- resolve_orders, resolve_orders_total)
+ resolve_draft_orders, resolve_homepage_events, resolve_order,
+ resolve_order_by_token, resolve_orders, resolve_orders_total)
from .types import Order, OrderEvent
@@ -40,6 +40,13 @@
status=graphene.Argument(
OrderStatusFilter, description='Filter order by status'),
description='List of the shop\'s orders.')
+ draft_orders = PrefetchingConnectionField(
+ Order,
+ query=graphene.String(description=DESCRIPTIONS['order']),
+ created=graphene.Argument(
+ ReportingPeriod,
+ description='Filter draft orders from a selected timespan.'),
+ description='List of the shop\'s draft orders.')
orders_total = graphene.Field(
TaxedMoney, description='Total sales.',
period=graphene.Argument(
@@ -62,6 +69,10 @@
self, info, created=None, status=None, query=None, **kwargs):
return resolve_orders(info, created, status, query)
+ @permission_required('order.manage_orders')
+ def resolve_draft_orders(self, info, created=None, query=None, **kwargs):
+ return resolve_draft_orders(info, created, query)
+
@permission_required('order.manage_orders')
def resolve_orders_total(self, info, period, **kwargs):
return resolve_orders_total(info, period)
|
{"golden_diff": "diff --git a/saleor/graphql/order/resolvers.py b/saleor/graphql/order/resolvers.py\n--- a/saleor/graphql/order/resolvers.py\n+++ b/saleor/graphql/order/resolvers.py\n@@ -12,12 +12,7 @@\n 'id', 'discount_name', 'token', 'user_email', 'user__email')\n \n \n-def resolve_orders(info, created, status, query):\n- user = info.context.user\n- if user.has_perm('order.manage_orders'):\n- qs = models.Order.objects.all()\n- else:\n- qs = user.orders.confirmed()\n+def filter_orders(qs, info, created, status, query):\n qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)\n \n # filter orders by status\n@@ -34,6 +29,20 @@\n return gql_optimizer.query(qs, info)\n \n \n+def resolve_orders(info, created, status, query):\n+ user = info.context.user\n+ if user.has_perm('order.manage_orders'):\n+ qs = models.Order.objects.confirmed()\n+ else:\n+ qs = user.orders.confirmed()\n+ return filter_orders(qs, info, created, status, query)\n+\n+\n+def resolve_draft_orders(info, created, query):\n+ qs = models.Order.objects.drafts()\n+ return filter_orders(qs, info, created, None, query)\n+\n+\n def resolve_orders_total(info, period):\n qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)\n qs = filter_by_period(qs, period, 'created')\ndiff --git a/saleor/graphql/order/schema.py b/saleor/graphql/order/schema.py\n--- a/saleor/graphql/order/schema.py\n+++ b/saleor/graphql/order/schema.py\n@@ -19,8 +19,8 @@\n OrderAddNote, OrderCancel, OrderCapture, OrderMarkAsPaid, OrderRefund,\n OrderUpdate, OrderUpdateShipping, OrderVoid)\n from .resolvers import (\n- resolve_homepage_events, resolve_order, resolve_order_by_token,\n- resolve_orders, resolve_orders_total)\n+ resolve_draft_orders, resolve_homepage_events, resolve_order,\n+ resolve_order_by_token, resolve_orders, resolve_orders_total)\n from .types import Order, OrderEvent\n \n \n@@ -40,6 +40,13 @@\n status=graphene.Argument(\n OrderStatusFilter, description='Filter order by status'),\n description='List of the shop\\'s orders.')\n+ draft_orders = PrefetchingConnectionField(\n+ Order,\n+ query=graphene.String(description=DESCRIPTIONS['order']),\n+ created=graphene.Argument(\n+ ReportingPeriod,\n+ description='Filter draft orders from a selected timespan.'),\n+ description='List of the shop\\'s draft orders.')\n orders_total = graphene.Field(\n TaxedMoney, description='Total sales.',\n period=graphene.Argument(\n@@ -62,6 +69,10 @@\n self, info, created=None, status=None, query=None, **kwargs):\n return resolve_orders(info, created, status, query)\n \n+ @permission_required('order.manage_orders')\n+ def resolve_draft_orders(self, info, created=None, query=None, **kwargs):\n+ return resolve_draft_orders(info, created, query)\n+\n @permission_required('order.manage_orders')\n def resolve_orders_total(self, info, period, **kwargs):\n return resolve_orders_total(info, period)\n", "issue": "Add query to fetch draft orders\nIn the Dashboard 2.0 we want to have separate links to view draft orders and confirmed ones. For that reason we need to add a separate resolver to return only draft orders. Exiting `orders` resolver should be changed to return confirmed orders.\n", "before_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...order import OrderEvents, OrderStatus, models\nfrom ...order.utils import sum_order_totals\nfrom ..utils import filter_by_period, filter_by_query_param\nfrom .enums import OrderStatusFilter\nfrom .types import Order\nfrom .utils import applicable_shipping_methods\n\nORDER_SEARCH_FIELDS = (\n 'id', 'discount_name', 'token', 'user_email', 'user__email')\n\n\ndef resolve_orders(info, created, status, query):\n user = info.context.user\n if user.has_perm('order.manage_orders'):\n qs = models.Order.objects.all()\n else:\n qs = user.orders.confirmed()\n qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)\n\n # filter orders by status\n if status is not None:\n if status == OrderStatusFilter.READY_TO_FULFILL:\n qs = qs.ready_to_fulfill()\n elif status == OrderStatusFilter.READY_TO_CAPTURE:\n qs = qs.ready_to_capture()\n\n # filter orders by creation date\n if created is not None:\n qs = filter_by_period(qs, created, 'created')\n\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_orders_total(info, period):\n qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)\n qs = filter_by_period(qs, period, 'created')\n return sum_order_totals(qs)\n\n\ndef resolve_order(info, id):\n \"\"\"Return order only for user assigned to it or proper staff user.\"\"\"\n user = info.context.user\n order = graphene.Node.get_node_from_global_id(info, id, Order)\n if user.has_perm('order.manage_orders') or order.user == user:\n return order\n return None\n\n\ndef resolve_shipping_methods(obj, info, price):\n return applicable_shipping_methods(obj, info, price)\n\n\ndef resolve_homepage_events(info):\n # Filter only selected events to be displayed on homepage.\n types = [\n OrderEvents.PLACED.value, OrderEvents.PLACED_FROM_DRAFT.value,\n OrderEvents.ORDER_FULLY_PAID.value]\n return models.OrderEvent.objects.filter(type__in=types)\n\n\ndef resolve_order_by_token(info, token):\n return models.Order.objects.filter(token=token).first()\n", "path": "saleor/graphql/order/resolvers.py"}, {"content": "from textwrap import dedent\n\nimport graphene\nfrom graphql_jwt.decorators import login_required, permission_required\n\nfrom ..core.enums import ReportingPeriod\nfrom ..core.fields import PrefetchingConnectionField\nfrom ..core.types import TaxedMoney\nfrom ..descriptions import DESCRIPTIONS\nfrom .bulk_mutations.draft_orders import DraftOrderBulkDelete\nfrom .enums import OrderStatusFilter\nfrom .mutations.draft_orders import (\n DraftOrderComplete, DraftOrderCreate, DraftOrderDelete,\n DraftOrderLineDelete, DraftOrderLinesCreate, DraftOrderLineUpdate,\n DraftOrderUpdate)\nfrom .mutations.fulfillments import (\n FulfillmentCancel, FulfillmentCreate, FulfillmentUpdateTracking)\nfrom .mutations.orders import (\n OrderAddNote, OrderCancel, OrderCapture, OrderMarkAsPaid, OrderRefund,\n OrderUpdate, OrderUpdateShipping, OrderVoid)\nfrom .resolvers import (\n resolve_homepage_events, resolve_order, resolve_order_by_token,\n resolve_orders, resolve_orders_total)\nfrom .types import Order, OrderEvent\n\n\nclass OrderQueries(graphene.ObjectType):\n homepage_events = PrefetchingConnectionField(\n OrderEvent, description=dedent('''List of activity events to display on\n homepage (at the moment it only contains order-events).'''))\n order = graphene.Field(\n Order, description='Lookup an order by ID.',\n id=graphene.Argument(graphene.ID, required=True))\n orders = PrefetchingConnectionField(\n Order,\n query=graphene.String(description=DESCRIPTIONS['order']),\n created=graphene.Argument(\n ReportingPeriod,\n description='Filter orders from a selected timespan.'),\n status=graphene.Argument(\n OrderStatusFilter, description='Filter order by status'),\n description='List of the shop\\'s orders.')\n orders_total = graphene.Field(\n TaxedMoney, description='Total sales.',\n period=graphene.Argument(\n ReportingPeriod,\n description='Get total sales for selected span of time.'))\n order_by_token = graphene.Field(\n Order, description='Lookup an order by token.',\n token=graphene.Argument(graphene.String, required=True))\n\n @permission_required('order.manage_orders')\n def resolve_homepage_events(self, info, **kwargs):\n return resolve_homepage_events(info)\n\n @login_required\n def resolve_order(self, info, id):\n return resolve_order(info, id)\n\n @login_required\n def resolve_orders(\n self, info, created=None, status=None, query=None, **kwargs):\n return resolve_orders(info, created, status, query)\n\n @permission_required('order.manage_orders')\n def resolve_orders_total(self, info, period, **kwargs):\n return resolve_orders_total(info, period)\n\n def resolve_order_by_token(self, info, token):\n return resolve_order_by_token(info, token)\n\n\nclass OrderMutations(graphene.ObjectType):\n draft_order_complete = DraftOrderComplete.Field()\n draft_order_create = DraftOrderCreate.Field()\n draft_order_delete = DraftOrderDelete.Field()\n draft_order_bulk_delete = DraftOrderBulkDelete.Field()\n draft_order_lines_create = DraftOrderLinesCreate.Field()\n draft_order_line_delete = DraftOrderLineDelete.Field()\n draft_order_line_update = DraftOrderLineUpdate.Field()\n draft_order_update = DraftOrderUpdate.Field()\n\n order_add_note = OrderAddNote.Field()\n order_cancel = OrderCancel.Field()\n order_capture = OrderCapture.Field()\n order_fulfillment_cancel = FulfillmentCancel.Field()\n order_fulfillment_create = FulfillmentCreate.Field()\n order_fulfillment_update_tracking = FulfillmentUpdateTracking.Field()\n order_mark_as_paid = OrderMarkAsPaid.Field()\n order_refund = OrderRefund.Field()\n order_update = OrderUpdate.Field()\n order_update_shipping = OrderUpdateShipping.Field()\n order_void = OrderVoid.Field()\n", "path": "saleor/graphql/order/schema.py"}]}
| 2,228 | 747 |
gh_patches_debug_28007
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Dawson function for dispersion
`plasma_dispersion_func` under `mathematics.py` currently uses `erf()` along with some other terms. This can be simplified to Dawson function, [dawsn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.dawsn.html), and may even offer some minor speedups if scipy implements it in C code.
</issue>
<code>
[start of plasmapy/mathematics/mathematics.py]
1 """Functions related to the plasma dispersion function"""
2
3 import numpy as np
4 from scipy import special
5 from astropy import units as u
6
7
8 def plasma_dispersion_func(zeta):
9 r"""
10 Calculate the plasma dispersion function
11
12 Parameters
13 ----------
14 zeta : complex, int, float, ndarray, or Quantity
15 Argument of plasma dispersion function.
16
17 Returns
18 -------
19 Z : complex, float, or ndarray
20 Value of plasma dispersion function.
21
22 Raises
23 ------
24 TypeError
25 If the argument is invalid.
26 UnitsError
27 If the argument is a Quantity but is not dimensionless
28 ValueError
29 If the argument is not entirely finite
30
31 See also
32 --------
33 plasma_dispersion_func_deriv
34
35 Notes
36 -----
37 The plasma dispersion function is defined as:
38
39 .. math::
40 Z(\zeta) = \pi^{-0.5} \int_{-\infty}^{+\infty} \frac{e^{-x^2}}{x-\zeta} dx
41
42 where the argument is a complex number [fried.conte-1961]_.
43
44 In plasma wave theory, the plasma dispersion function appears
45 frequently when the background medium has a Maxwellian
46 distribution function. The argument of this function then refers
47 to the ratio of a wave's phase velocity to a thermal velocity.
48
49 References
50 ----------
51 .. [fried.conte-1961] Fried, Burton D. and Samuel D. Conte. 1961.
52 The Plasma Dispersion Function: The Hilbert Transformation of the
53 Gaussian. Academic Press (New York and London).
54
55 Examples
56 --------
57 >>> plasma_dispersion_func(0)
58 1.7724538509055159j
59 >>> plasma_dispersion_func(1j)
60 0.7578721561413119j
61 >>> plasma_dispersion_func(-1.52+0.47j)
62 (0.6088888957234255+0.3349458388287403j)
63
64 """
65
66 if not isinstance(zeta, (int, float, complex, np.ndarray, u.Quantity)):
67 raise TypeError("The argument to plasma_dispersion_function "
68 "must be one of the following types: complex, float, "
69 "int, ndarray, or Quantity.")
70
71 if isinstance(zeta, u.Quantity):
72 if zeta.unit == u.dimensionless_unscaled:
73 zeta = zeta.value
74 else:
75 raise u.UnitsError("The argument to plasma_dispersion_function "
76 "must be dimensionless if it is a Quantity")
77
78 if not np.all(np.isfinite(zeta)):
79 raise ValueError("The argument to plasma_dispersion_function is "
80 "not finite.")
81
82 Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))
83
84 return Z
85
86
87 def plasma_dispersion_func_deriv(zeta):
88 r"""Calculate the derivative of the plasma dispersion function
89
90 Parameters
91 ----------
92 zeta : complex, int, float, ndarray, or Quantity
93 Argument of plasma dispersion function.
94
95 Returns
96 -------
97 Zprime : complex, int, float, or ndarray
98 First derivative of plasma dispersion function.
99
100 Raises
101 ------
102 TypeError
103 If the argument is invalid.
104 UnitsError
105 If the argument is a Quantity but is not dimensionless
106 ValueError
107 If the argument is not entirely finite
108
109 See also
110 --------
111 plasma_dispersion_func
112
113 Notes
114 -----
115 The derivative of the plasma dispersion function is defined as:
116
117 .. math::
118 Z'(\zeta) = \pi^{-0.5} \int_{-\infty}^{+\infty} \frac{e^{-x^2}}{(x-\zeta)^2} dx
119
120 where the argument is a complex number [fried.conte-1961]_.
121
122 Examples
123 --------
124 >>> plasma_dispersion_func_deriv(0)
125 (-2+0j)
126 >>> plasma_dispersion_func_deriv(1j)
127 (-0.48425568771737626+0j)
128 >>> plasma_dispersion_func_deriv(-1.52+0.47j)
129 (0.1658713314982294+0.4458797880593507j)
130
131 """
132
133 if not isinstance(zeta, (int, float, complex, np.ndarray, u.Quantity)):
134 raise TypeError("The argument to plasma_dispersion_function_deriv "
135 "must be one of the following types: complex, float, "
136 "int, ndarray, or Quantity.")
137
138 if isinstance(zeta, u.Quantity):
139 if zeta.unit == u.dimensionless_unscaled:
140 zeta = zeta.value
141 else:
142 raise u.UnitsError("The argument to "
143 "plasma_dispersion_function_deriv "
144 "must be dimensionless if it is a Quantity")
145
146 if not np.all(np.isfinite(zeta)):
147 raise ValueError("The argument to plasma_dispersion_function_deriv is "
148 "not finite.")
149
150 Zprime = -2 * (1 + zeta * plasma_dispersion_func(zeta))
151
152 return Zprime
153
[end of plasmapy/mathematics/mathematics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plasmapy/mathematics/mathematics.py b/plasmapy/mathematics/mathematics.py
--- a/plasmapy/mathematics/mathematics.py
+++ b/plasmapy/mathematics/mathematics.py
@@ -3,6 +3,7 @@
import numpy as np
from scipy import special
from astropy import units as u
+from scipy.special import wofz as Faddeeva_function
def plasma_dispersion_func(zeta):
@@ -57,9 +58,9 @@
>>> plasma_dispersion_func(0)
1.7724538509055159j
>>> plasma_dispersion_func(1j)
- 0.7578721561413119j
+ 0.757872156141312j
>>> plasma_dispersion_func(-1.52+0.47j)
- (0.6088888957234255+0.3349458388287403j)
+ (0.6088888957234254+0.33494583882874024j)
"""
@@ -79,7 +80,7 @@
raise ValueError("The argument to plasma_dispersion_function is "
"not finite.")
- Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))
+ Z = 1j * np.sqrt(np.pi) * Faddeeva_function(zeta)
return Z
@@ -124,9 +125,9 @@
>>> plasma_dispersion_func_deriv(0)
(-2+0j)
>>> plasma_dispersion_func_deriv(1j)
- (-0.48425568771737626+0j)
+ (-0.48425568771737604+0j)
>>> plasma_dispersion_func_deriv(-1.52+0.47j)
- (0.1658713314982294+0.4458797880593507j)
+ (0.16587133149822897+0.44587978805935047j)
"""
|
{"golden_diff": "diff --git a/plasmapy/mathematics/mathematics.py b/plasmapy/mathematics/mathematics.py\n--- a/plasmapy/mathematics/mathematics.py\n+++ b/plasmapy/mathematics/mathematics.py\n@@ -3,6 +3,7 @@\n import numpy as np\n from scipy import special\n from astropy import units as u\n+from scipy.special import wofz as Faddeeva_function\n \n \n def plasma_dispersion_func(zeta):\n@@ -57,9 +58,9 @@\n >>> plasma_dispersion_func(0)\n 1.7724538509055159j\n >>> plasma_dispersion_func(1j)\n- 0.7578721561413119j\n+ 0.757872156141312j\n >>> plasma_dispersion_func(-1.52+0.47j)\n- (0.6088888957234255+0.3349458388287403j)\n+ (0.6088888957234254+0.33494583882874024j)\n \n \"\"\"\n \n@@ -79,7 +80,7 @@\n raise ValueError(\"The argument to plasma_dispersion_function is \"\n \"not finite.\")\n \n- Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))\n+ Z = 1j * np.sqrt(np.pi) * Faddeeva_function(zeta)\n \n return Z\n \n@@ -124,9 +125,9 @@\n >>> plasma_dispersion_func_deriv(0)\n (-2+0j)\n >>> plasma_dispersion_func_deriv(1j)\n- (-0.48425568771737626+0j)\n+ (-0.48425568771737604+0j)\n >>> plasma_dispersion_func_deriv(-1.52+0.47j)\n- (0.1658713314982294+0.4458797880593507j)\n+ (0.16587133149822897+0.44587978805935047j)\n \n \"\"\"\n", "issue": "Use Dawson function for dispersion\n`plasma_dispersion_func` under `mathematics.py` currently uses `erf()` along with some other terms. This can be simplified to Dawson function, [dawsn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.dawsn.html), and may even offer some minor speedups if scipy implements it in C code.\n", "before_files": [{"content": "\"\"\"Functions related to the plasma dispersion function\"\"\"\n\nimport numpy as np\nfrom scipy import special\nfrom astropy import units as u\n\n\ndef plasma_dispersion_func(zeta):\n r\"\"\"\n Calculate the plasma dispersion function\n\n Parameters\n ----------\n zeta : complex, int, float, ndarray, or Quantity\n Argument of plasma dispersion function.\n\n Returns\n -------\n Z : complex, float, or ndarray\n Value of plasma dispersion function.\n\n Raises\n ------\n TypeError\n If the argument is invalid.\n UnitsError\n If the argument is a Quantity but is not dimensionless\n ValueError\n If the argument is not entirely finite\n\n See also\n --------\n plasma_dispersion_func_deriv\n\n Notes\n -----\n The plasma dispersion function is defined as:\n\n .. math::\n Z(\\zeta) = \\pi^{-0.5} \\int_{-\\infty}^{+\\infty} \\frac{e^{-x^2}}{x-\\zeta} dx\n\n where the argument is a complex number [fried.conte-1961]_.\n\n In plasma wave theory, the plasma dispersion function appears\n frequently when the background medium has a Maxwellian\n distribution function. The argument of this function then refers\n to the ratio of a wave's phase velocity to a thermal velocity.\n\n References\n ----------\n .. [fried.conte-1961] Fried, Burton D. and Samuel D. Conte. 1961.\n The Plasma Dispersion Function: The Hilbert Transformation of the\n Gaussian. Academic Press (New York and London).\n\n Examples\n --------\n >>> plasma_dispersion_func(0)\n 1.7724538509055159j\n >>> plasma_dispersion_func(1j)\n 0.7578721561413119j\n >>> plasma_dispersion_func(-1.52+0.47j)\n (0.6088888957234255+0.3349458388287403j)\n\n \"\"\"\n\n if not isinstance(zeta, (int, float, complex, np.ndarray, u.Quantity)):\n raise TypeError(\"The argument to plasma_dispersion_function \"\n \"must be one of the following types: complex, float, \"\n \"int, ndarray, or Quantity.\")\n\n if isinstance(zeta, u.Quantity):\n if zeta.unit == u.dimensionless_unscaled:\n zeta = zeta.value\n else:\n raise u.UnitsError(\"The argument to plasma_dispersion_function \"\n \"must be dimensionless if it is a Quantity\")\n\n if not np.all(np.isfinite(zeta)):\n raise ValueError(\"The argument to plasma_dispersion_function is \"\n \"not finite.\")\n\n Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))\n\n return Z\n\n\ndef plasma_dispersion_func_deriv(zeta):\n r\"\"\"Calculate the derivative of the plasma dispersion function\n\n Parameters\n ----------\n zeta : complex, int, float, ndarray, or Quantity\n Argument of plasma dispersion function.\n\n Returns\n -------\n Zprime : complex, int, float, or ndarray\n First derivative of plasma dispersion function.\n\n Raises\n ------\n TypeError\n If the argument is invalid.\n UnitsError\n If the argument is a Quantity but is not dimensionless\n ValueError\n If the argument is not entirely finite\n\n See also\n --------\n plasma_dispersion_func\n\n Notes\n -----\n The derivative of the plasma dispersion function is defined as:\n\n .. math::\n Z'(\\zeta) = \\pi^{-0.5} \\int_{-\\infty}^{+\\infty} \\frac{e^{-x^2}}{(x-\\zeta)^2} dx\n\n where the argument is a complex number [fried.conte-1961]_.\n\n Examples\n --------\n >>> plasma_dispersion_func_deriv(0)\n (-2+0j)\n >>> plasma_dispersion_func_deriv(1j)\n (-0.48425568771737626+0j)\n >>> plasma_dispersion_func_deriv(-1.52+0.47j)\n (0.1658713314982294+0.4458797880593507j)\n\n \"\"\"\n\n if not isinstance(zeta, (int, float, complex, np.ndarray, u.Quantity)):\n raise TypeError(\"The argument to plasma_dispersion_function_deriv \"\n \"must be one of the following types: complex, float, \"\n \"int, ndarray, or Quantity.\")\n\n if isinstance(zeta, u.Quantity):\n if zeta.unit == u.dimensionless_unscaled:\n zeta = zeta.value\n else:\n raise u.UnitsError(\"The argument to \"\n \"plasma_dispersion_function_deriv \"\n \"must be dimensionless if it is a Quantity\")\n\n if not np.all(np.isfinite(zeta)):\n raise ValueError(\"The argument to plasma_dispersion_function_deriv is \"\n \"not finite.\")\n\n Zprime = -2 * (1 + zeta * plasma_dispersion_func(zeta))\n\n return Zprime\n", "path": "plasmapy/mathematics/mathematics.py"}]}
| 2,179 | 603 |
gh_patches_debug_678
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-8746
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revert "Create guess_the_number_search.py"
Reverts TheAlgorithms/Python#7937
@ChrisO345 the algorithm you merged failed tests, you shouldn't have merged it
> https://github.com/TheAlgorithms/Python/actions/runs/4997927546/jobs/8952811360
> https://results.pre-commit.ci/run/github/63476337/1684282951.oykZY7Z4R3qR94KO0YZS2Q
</issue>
<code>
[start of other/guess_the_number_search.py]
1 """
2 guess the number using lower,higher and the value to find or guess
3
4 solution works by dividing lower and higher of number guessed
5
6 suppose lower is 0, higher is 1000 and the number to guess is 355
7
8 >>> guess_the_number(10, 1000, 17)
9 started...
10 guess the number : 17
11 details : [505, 257, 133, 71, 40, 25, 17]
12
13 """
14
15
16 def temp_input_value(
17 min_val: int = 10, max_val: int = 1000, option: bool = True
18 ) -> int:
19 """
20 Temporary input values for tests
21
22 >>> temp_input_value(option=True)
23 10
24
25 >>> temp_input_value(option=False)
26 1000
27
28 >>> temp_input_value(min_val=100, option=True)
29 100
30
31 >>> temp_input_value(min_val=100, max_val=50)
32 Traceback (most recent call last):
33 ...
34 ValueError: Invalid value for min_val or max_val (min_value < max_value)
35
36 >>> temp_input_value("ten","fifty",1)
37 Traceback (most recent call last):
38 ...
39 AssertionError: Invalid type of value(s) specified to function!
40
41 >>> temp_input_value(min_val=-100, max_val=500)
42 -100
43
44 >>> temp_input_value(min_val=-5100, max_val=-100)
45 -5100
46 """
47 assert (
48 isinstance(min_val, int)
49 and isinstance(max_val, int)
50 and isinstance(option, bool)
51 ), "Invalid type of value(s) specified to function!"
52
53 if min_val > max_val:
54 raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
55 return min_val if option else max_val
56
57
58 def get_avg(number_1: int, number_2: int) -> int:
59 """
60 Return the mid-number(whole) of two integers a and b
61
62 >>> get_avg(10, 15)
63 12
64
65 >>> get_avg(20, 300)
66 160
67
68 >>> get_avg("abcd", 300)
69 Traceback (most recent call last):
70 ...
71 TypeError: can only concatenate str (not "int") to str
72
73 >>> get_avg(10.5,50.25)
74 30
75 """
76 return int((number_1 + number_2) / 2)
77
78
79 def guess_the_number(lower: int, higher: int, to_guess: int) -> None:
80 """
81 The `guess_the_number` function that guess the number by some operations
82 and using inner functions
83
84 >>> guess_the_number(10, 1000, 17)
85 started...
86 guess the number : 17
87 details : [505, 257, 133, 71, 40, 25, 17]
88
89 >>> guess_the_number(-10000, 10000, 7)
90 started...
91 guess the number : 7
92 details : [0, 5000, 2500, 1250, 625, 312, 156, 78, 39, 19, 9, 4, 6, 7]
93
94 >>> guess_the_number(10, 1000, "a")
95 Traceback (most recent call last):
96 ...
97 AssertionError: argument values must be type of "int"
98
99 >>> guess_the_number(10, 1000, 5)
100 Traceback (most recent call last):
101 ...
102 ValueError: guess value must be within the range of lower and higher value
103
104 >>> guess_the_number(10000, 100, 5)
105 Traceback (most recent call last):
106 ...
107 ValueError: argument value for lower and higher must be(lower > higher)
108 """
109 assert (
110 isinstance(lower, int) and isinstance(higher, int) and isinstance(to_guess, int)
111 ), 'argument values must be type of "int"'
112
113 if lower > higher:
114 raise ValueError("argument value for lower and higher must be(lower > higher)")
115
116 if not lower < to_guess < higher:
117 raise ValueError(
118 "guess value must be within the range of lower and higher value"
119 )
120
121 def answer(number: int) -> str:
122 """
123 Returns value by comparing with entered `to_guess` number
124 """
125 if number > to_guess:
126 return "high"
127 elif number < to_guess:
128 return "low"
129 else:
130 return "same"
131
132 print("started...")
133
134 last_lowest = lower
135 last_highest = higher
136
137 last_numbers = []
138
139 while True:
140 number = get_avg(last_lowest, last_highest)
141 last_numbers.append(number)
142
143 if answer(number) == "low":
144 last_lowest = number
145 elif answer(number) == "high":
146 last_highest = number
147 else:
148 break
149
150 print(f"guess the number : {last_numbers[-1]}")
151 print(f"details : {str(last_numbers)}")
152
153
154 def main() -> None:
155 """
156 starting point or function of script
157 """
158 lower = int(input("Enter lower value : ").strip())
159 higher = int(input("Enter high value : ").strip())
160 guess = int(input("Enter value to guess : ").strip())
161 guess_the_number(lower, higher, guess)
162
163
164 if __name__ == "__main__":
165 main()
166
[end of other/guess_the_number_search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py
--- a/other/guess_the_number_search.py
+++ b/other/guess_the_number_search.py
@@ -148,7 +148,7 @@
break
print(f"guess the number : {last_numbers[-1]}")
- print(f"details : {str(last_numbers)}")
+ print(f"details : {last_numbers!s}")
def main() -> None:
|
{"golden_diff": "diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py\n--- a/other/guess_the_number_search.py\n+++ b/other/guess_the_number_search.py\n@@ -148,7 +148,7 @@\n break\n \n print(f\"guess the number : {last_numbers[-1]}\")\n- print(f\"details : {str(last_numbers)}\")\n+ print(f\"details : {last_numbers!s}\")\n \n \n def main() -> None:\n", "issue": "Revert \"Create guess_the_number_search.py\"\nReverts TheAlgorithms/Python#7937\r\n@ChrisO345 the algorithm you merged failed tests, you shouldn't have merged it\r\n\r\n> https://github.com/TheAlgorithms/Python/actions/runs/4997927546/jobs/8952811360\r\n> https://results.pre-commit.ci/run/github/63476337/1684282951.oykZY7Z4R3qR94KO0YZS2Q\n", "before_files": [{"content": "\"\"\"\nguess the number using lower,higher and the value to find or guess\n\nsolution works by dividing lower and higher of number guessed\n\nsuppose lower is 0, higher is 1000 and the number to guess is 355\n\n>>> guess_the_number(10, 1000, 17)\nstarted...\nguess the number : 17\ndetails : [505, 257, 133, 71, 40, 25, 17]\n\n\"\"\"\n\n\ndef temp_input_value(\n min_val: int = 10, max_val: int = 1000, option: bool = True\n) -> int:\n \"\"\"\n Temporary input values for tests\n\n >>> temp_input_value(option=True)\n 10\n\n >>> temp_input_value(option=False)\n 1000\n\n >>> temp_input_value(min_val=100, option=True)\n 100\n\n >>> temp_input_value(min_val=100, max_val=50)\n Traceback (most recent call last):\n ...\n ValueError: Invalid value for min_val or max_val (min_value < max_value)\n\n >>> temp_input_value(\"ten\",\"fifty\",1)\n Traceback (most recent call last):\n ...\n AssertionError: Invalid type of value(s) specified to function!\n\n >>> temp_input_value(min_val=-100, max_val=500)\n -100\n\n >>> temp_input_value(min_val=-5100, max_val=-100)\n -5100\n \"\"\"\n assert (\n isinstance(min_val, int)\n and isinstance(max_val, int)\n and isinstance(option, bool)\n ), \"Invalid type of value(s) specified to function!\"\n\n if min_val > max_val:\n raise ValueError(\"Invalid value for min_val or max_val (min_value < max_value)\")\n return min_val if option else max_val\n\n\ndef get_avg(number_1: int, number_2: int) -> int:\n \"\"\"\n Return the mid-number(whole) of two integers a and b\n\n >>> get_avg(10, 15)\n 12\n\n >>> get_avg(20, 300)\n 160\n\n >>> get_avg(\"abcd\", 300)\n Traceback (most recent call last):\n ...\n TypeError: can only concatenate str (not \"int\") to str\n\n >>> get_avg(10.5,50.25)\n 30\n \"\"\"\n return int((number_1 + number_2) / 2)\n\n\ndef guess_the_number(lower: int, higher: int, to_guess: int) -> None:\n \"\"\"\n The `guess_the_number` function that guess the number by some operations\n and using inner functions\n\n >>> guess_the_number(10, 1000, 17)\n started...\n guess the number : 17\n details : [505, 257, 133, 71, 40, 25, 17]\n\n >>> guess_the_number(-10000, 10000, 7)\n started...\n guess the number : 7\n details : [0, 5000, 2500, 1250, 625, 312, 156, 78, 39, 19, 9, 4, 6, 7]\n\n >>> guess_the_number(10, 1000, \"a\")\n Traceback (most recent call last):\n ...\n AssertionError: argument values must be type of \"int\"\n\n >>> guess_the_number(10, 1000, 5)\n Traceback (most recent call last):\n ...\n ValueError: guess value must be within the range of lower and higher value\n\n >>> guess_the_number(10000, 100, 5)\n Traceback (most recent call last):\n ...\n ValueError: argument value for lower and higher must be(lower > higher)\n \"\"\"\n assert (\n isinstance(lower, int) and isinstance(higher, int) and isinstance(to_guess, int)\n ), 'argument values must be type of \"int\"'\n\n if lower > higher:\n raise ValueError(\"argument value for lower and higher must be(lower > higher)\")\n\n if not lower < to_guess < higher:\n raise ValueError(\n \"guess value must be within the range of lower and higher value\"\n )\n\n def answer(number: int) -> str:\n \"\"\"\n Returns value by comparing with entered `to_guess` number\n \"\"\"\n if number > to_guess:\n return \"high\"\n elif number < to_guess:\n return \"low\"\n else:\n return \"same\"\n\n print(\"started...\")\n\n last_lowest = lower\n last_highest = higher\n\n last_numbers = []\n\n while True:\n number = get_avg(last_lowest, last_highest)\n last_numbers.append(number)\n\n if answer(number) == \"low\":\n last_lowest = number\n elif answer(number) == \"high\":\n last_highest = number\n else:\n break\n\n print(f\"guess the number : {last_numbers[-1]}\")\n print(f\"details : {str(last_numbers)}\")\n\n\ndef main() -> None:\n \"\"\"\n starting point or function of script\n \"\"\"\n lower = int(input(\"Enter lower value : \").strip())\n higher = int(input(\"Enter high value : \").strip())\n guess = int(input(\"Enter value to guess : \").strip())\n guess_the_number(lower, higher, guess)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "other/guess_the_number_search.py"}]}
| 2,344 | 109 |
gh_patches_debug_4263
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3093
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: kernel is not built during runtime for scaled softmax
### 🐛 Describe the bug
This FusedScaleMaskSoftmax module is likely to fail if pre-built OPs are not present. During runtime build, `scaled_masked_softmax` will be None in the `get_batch_per_block` method.
The code can be found.
https://github.com/hpcaitech/ColossalAI/blob/fff98f06edfb0ec0aba339776db34ba5bb6405f9/colossalai/kernel/cuda_native/scaled_softmax.py#L182
### Environment
_No response_
</issue>
<code>
[start of colossalai/kernel/cuda_native/scaled_softmax.py]
1 # This code from NVIDIA Megatron:
2 # with minor changes.
3
4 import enum
5
6 import torch
7 import torch.nn as nn
8
9 from colossalai.kernel.op_builder.scaled_masked_softmax import ScaledMaskedSoftmaxBuilder
10 from colossalai.kernel.op_builder.scaled_upper_triangle_masked_softmax import ScaledUpperTrainglemaskedSoftmaxBuilder
11
12 try:
13 from colossalai._C import scaled_masked_softmax, scaled_upper_triang_masked_softmax
14 except ImportError:
15 scaled_masked_softmax = None
16 scaled_upper_triang_masked_softmax = None
17
18
19 class AttnMaskType(enum.Enum):
20 padding = 1
21 causal = 2
22
23
24 class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
25 """
26 Fused operation which performs following three operations in sequence
27
28 1. Scale the tensor.
29 2. Apply upper triangular mask (typically used in gpt models).
30 3. Perform softmax.
31 """
32
33 @staticmethod
34 def forward(ctx, inputs, scale):
35 global scaled_upper_triang_masked_softmax
36 if scaled_upper_triang_masked_softmax:
37 scaled_upper_triang_masked_softmax = ScaledUpperTrainglemaskedSoftmaxBuilder().load()
38
39 scale_t = torch.tensor([scale])
40 softmax_results = scaled_upper_triang_masked_softmax.forward(inputs, scale_t[0])
41
42 ctx.save_for_backward(softmax_results, scale_t)
43 return softmax_results
44
45 @staticmethod
46 def backward(ctx, output_grads):
47 softmax_results, scale_t = ctx.saved_tensors
48 input_grads = scaled_upper_triang_masked_softmax.backward(output_grads, softmax_results, scale_t[0])
49
50 return input_grads, None
51
52
53 class ScaledMaskedSoftmax(torch.autograd.Function):
54 """
55 Fused operation which performs following three operations in sequence
56
57 1. Scale the tensor.
58 2. Apply the mask.
59 3. Perform softmax.
60 """
61
62 @staticmethod
63 def forward(ctx, inputs, mask, scale):
64 scale_t = torch.tensor([scale])
65
66 # build and load kernel if not pre-built
67 global scaled_masked_softmax
68 if scaled_masked_softmax is None:
69 scaled_masked_softmax = ScaledMaskedSoftmaxBuilder().load()
70
71 softmax_results = scaled_masked_softmax.forward(inputs, mask, scale_t[0])
72 ctx.save_for_backward(softmax_results, scale_t)
73 return softmax_results
74
75 @staticmethod
76 def backward(ctx, output_grads):
77 softmax_results, scale_t = ctx.saved_tensors
78
79 input_grads = scaled_masked_softmax.backward(output_grads, softmax_results, scale_t[0])
80 return input_grads, None, None, None
81
82
83 class FusedScaleMaskSoftmax(nn.Module):
84 """
85 Fused operation: scaling + mask + softmax
86
87 Arguments:
88 input_in_fp16: Flag to indicate if input in fp16 data format.
89 input_in_bf16: Flag to indicate if input in bf16 data format.
90 attn_mask_type: Attention mask type (pad or causal)
91 scaled_masked_softmax_fusion: Flag to indicate user want to use softmax fusion
92 mask_func: Mask function to be applied.
93 softmax_in_fp32: If True, softmax in performed at fp32 precision.
94 scale: Scaling factor used in input tensor scaling.
95 """
96
97 def __init__(
98 self,
99 input_in_fp16,
100 input_in_bf16,
101 attn_mask_type,
102 scaled_masked_softmax_fusion,
103 mask_func,
104 softmax_in_fp32,
105 scale,
106 ):
107 super(FusedScaleMaskSoftmax, self).__init__()
108 self.input_in_fp16 = input_in_fp16
109 self.input_in_bf16 = input_in_bf16
110 assert not (self.input_in_fp16
111 and self.input_in_bf16), "both fp16 and bf16 flags cannot be active at the same time."
112 self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
113 self.attn_mask_type = attn_mask_type
114 self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
115 self.mask_func = mask_func
116 self.softmax_in_fp32 = softmax_in_fp32
117 self.scale = scale
118 assert (self.scale is None or softmax_in_fp32), "softmax should be in fp32 when scaled"
119
120 def forward(self, input, mask):
121 # [b, np, sq, sk]
122 assert input.dim() == 4
123
124 if self.is_kernel_available(mask, *input.size()):
125 return self.forward_fused_softmax(input, mask)
126 else:
127 return self.forward_torch_softmax(input, mask)
128
129 def is_kernel_available(self, mask, b, np, sq, sk):
130 attn_batches = b * np
131
132 if (self.scaled_masked_softmax_fusion # user want to fuse
133 and self.input_in_float16 # input must be fp16
134 and mask is not None # mask tensor must not be None
135 and 16 < sk <= 2048 # sk must be 16 ~ 2048
136 and sq % 4 == 0 # sq must be divisor of 4
137 and attn_batches % 4 == 0 # np * b must be divisor of 4
138 ):
139 if 0 <= sk <= 2048:
140 batch_per_block = self.get_batch_per_block(sq, sk, b, np)
141
142 if self.attn_mask_type == AttnMaskType.causal:
143 if attn_batches % batch_per_block == 0:
144 return True
145 else:
146 if sq % batch_per_block == 0:
147 return True
148 return False
149
150 def forward_fused_softmax(self, input, mask):
151 b, np, sq, sk = input.size()
152 scale = self.scale if self.scale is not None else 1.0
153
154 if self.attn_mask_type == AttnMaskType.causal:
155 assert sq == sk, "causal mask is only for self attention"
156
157 # input is 3D tensor (attn_batches, sq, sk)
158 input = input.view(-1, sq, sk)
159 probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
160 return probs.view(b, np, sq, sk)
161 else:
162 # input is 4D tensor (b, np, sq, sk)
163 return ScaledMaskedSoftmax.apply(input, mask, scale)
164
165 def forward_torch_softmax(self, input, mask):
166 if self.input_in_float16 and self.softmax_in_fp32:
167 input = input.float()
168
169 if self.scale is not None:
170 input = input * self.scale
171 mask_output = self.mask_func(input, mask) if mask is not None else input
172 probs = torch.nn.Softmax(dim=-1)(mask_output)
173
174 if self.input_in_float16 and self.softmax_in_fp32:
175 if self.input_in_fp16:
176 probs = probs.half()
177 else:
178 probs = probs.bfloat16()
179
180 return probs
181
182 def get_batch_per_block(self, sq, sk, b, np):
183 return scaled_masked_softmax.get_batch_per_block(sq, sk, b, np)
184
[end of colossalai/kernel/cuda_native/scaled_softmax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/kernel/cuda_native/scaled_softmax.py b/colossalai/kernel/cuda_native/scaled_softmax.py
--- a/colossalai/kernel/cuda_native/scaled_softmax.py
+++ b/colossalai/kernel/cuda_native/scaled_softmax.py
@@ -180,4 +180,9 @@
return probs
def get_batch_per_block(self, sq, sk, b, np):
+ # build and load kernel if not pre-built
+ global scaled_masked_softmax
+ if scaled_masked_softmax is None:
+ scaled_masked_softmax = ScaledMaskedSoftmaxBuilder().load()
+
return scaled_masked_softmax.get_batch_per_block(sq, sk, b, np)
|
{"golden_diff": "diff --git a/colossalai/kernel/cuda_native/scaled_softmax.py b/colossalai/kernel/cuda_native/scaled_softmax.py\n--- a/colossalai/kernel/cuda_native/scaled_softmax.py\n+++ b/colossalai/kernel/cuda_native/scaled_softmax.py\n@@ -180,4 +180,9 @@\n return probs\n \n def get_batch_per_block(self, sq, sk, b, np):\n+ # build and load kernel if not pre-built\n+ global scaled_masked_softmax\n+ if scaled_masked_softmax is None:\n+ scaled_masked_softmax = ScaledMaskedSoftmaxBuilder().load()\n+\n return scaled_masked_softmax.get_batch_per_block(sq, sk, b, np)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: kernel is not built during runtime for scaled softmax\n### \ud83d\udc1b Describe the bug\n\nThis FusedScaleMaskSoftmax module is likely to fail if pre-built OPs are not present. During runtime build, `scaled_masked_softmax` will be None in the `get_batch_per_block` method.\r\n\r\n\r\nThe code can be found.\r\nhttps://github.com/hpcaitech/ColossalAI/blob/fff98f06edfb0ec0aba339776db34ba5bb6405f9/colossalai/kernel/cuda_native/scaled_softmax.py#L182\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "# This code from NVIDIA Megatron:\n# with minor changes.\n\nimport enum\n\nimport torch\nimport torch.nn as nn\n\nfrom colossalai.kernel.op_builder.scaled_masked_softmax import ScaledMaskedSoftmaxBuilder\nfrom colossalai.kernel.op_builder.scaled_upper_triangle_masked_softmax import ScaledUpperTrainglemaskedSoftmaxBuilder\n\ntry:\n from colossalai._C import scaled_masked_softmax, scaled_upper_triang_masked_softmax\nexcept ImportError:\n scaled_masked_softmax = None\n scaled_upper_triang_masked_softmax = None\n\n\nclass AttnMaskType(enum.Enum):\n padding = 1\n causal = 2\n\n\nclass ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):\n \"\"\"\n Fused operation which performs following three operations in sequence\n\n 1. Scale the tensor.\n 2. Apply upper triangular mask (typically used in gpt models).\n 3. Perform softmax.\n \"\"\"\n\n @staticmethod\n def forward(ctx, inputs, scale):\n global scaled_upper_triang_masked_softmax\n if scaled_upper_triang_masked_softmax:\n scaled_upper_triang_masked_softmax = ScaledUpperTrainglemaskedSoftmaxBuilder().load()\n\n scale_t = torch.tensor([scale])\n softmax_results = scaled_upper_triang_masked_softmax.forward(inputs, scale_t[0])\n\n ctx.save_for_backward(softmax_results, scale_t)\n return softmax_results\n\n @staticmethod\n def backward(ctx, output_grads):\n softmax_results, scale_t = ctx.saved_tensors\n input_grads = scaled_upper_triang_masked_softmax.backward(output_grads, softmax_results, scale_t[0])\n\n return input_grads, None\n\n\nclass ScaledMaskedSoftmax(torch.autograd.Function):\n \"\"\"\n Fused operation which performs following three operations in sequence\n\n 1. Scale the tensor.\n 2. Apply the mask.\n 3. Perform softmax.\n \"\"\"\n\n @staticmethod\n def forward(ctx, inputs, mask, scale):\n scale_t = torch.tensor([scale])\n\n # build and load kernel if not pre-built\n global scaled_masked_softmax\n if scaled_masked_softmax is None:\n scaled_masked_softmax = ScaledMaskedSoftmaxBuilder().load()\n\n softmax_results = scaled_masked_softmax.forward(inputs, mask, scale_t[0])\n ctx.save_for_backward(softmax_results, scale_t)\n return softmax_results\n\n @staticmethod\n def backward(ctx, output_grads):\n softmax_results, scale_t = ctx.saved_tensors\n\n input_grads = scaled_masked_softmax.backward(output_grads, softmax_results, scale_t[0])\n return input_grads, None, None, None\n\n\nclass FusedScaleMaskSoftmax(nn.Module):\n \"\"\"\n Fused operation: scaling + mask + softmax\n\n Arguments:\n input_in_fp16: Flag to indicate if input in fp16 data format.\n input_in_bf16: Flag to indicate if input in bf16 data format.\n attn_mask_type: Attention mask type (pad or causal)\n scaled_masked_softmax_fusion: Flag to indicate user want to use softmax fusion\n mask_func: Mask function to be applied.\n softmax_in_fp32: If True, softmax in performed at fp32 precision.\n scale: Scaling factor used in input tensor scaling.\n \"\"\"\n\n def __init__(\n self,\n input_in_fp16,\n input_in_bf16,\n attn_mask_type,\n scaled_masked_softmax_fusion,\n mask_func,\n softmax_in_fp32,\n scale,\n ):\n super(FusedScaleMaskSoftmax, self).__init__()\n self.input_in_fp16 = input_in_fp16\n self.input_in_bf16 = input_in_bf16\n assert not (self.input_in_fp16\n and self.input_in_bf16), \"both fp16 and bf16 flags cannot be active at the same time.\"\n self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16\n self.attn_mask_type = attn_mask_type\n self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion\n self.mask_func = mask_func\n self.softmax_in_fp32 = softmax_in_fp32\n self.scale = scale\n assert (self.scale is None or softmax_in_fp32), \"softmax should be in fp32 when scaled\"\n\n def forward(self, input, mask):\n # [b, np, sq, sk]\n assert input.dim() == 4\n\n if self.is_kernel_available(mask, *input.size()):\n return self.forward_fused_softmax(input, mask)\n else:\n return self.forward_torch_softmax(input, mask)\n\n def is_kernel_available(self, mask, b, np, sq, sk):\n attn_batches = b * np\n\n if (self.scaled_masked_softmax_fusion # user want to fuse\n and self.input_in_float16 # input must be fp16\n and mask is not None # mask tensor must not be None\n and 16 < sk <= 2048 # sk must be 16 ~ 2048\n and sq % 4 == 0 # sq must be divisor of 4\n and attn_batches % 4 == 0 # np * b must be divisor of 4\n ):\n if 0 <= sk <= 2048:\n batch_per_block = self.get_batch_per_block(sq, sk, b, np)\n\n if self.attn_mask_type == AttnMaskType.causal:\n if attn_batches % batch_per_block == 0:\n return True\n else:\n if sq % batch_per_block == 0:\n return True\n return False\n\n def forward_fused_softmax(self, input, mask):\n b, np, sq, sk = input.size()\n scale = self.scale if self.scale is not None else 1.0\n\n if self.attn_mask_type == AttnMaskType.causal:\n assert sq == sk, \"causal mask is only for self attention\"\n\n # input is 3D tensor (attn_batches, sq, sk)\n input = input.view(-1, sq, sk)\n probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)\n return probs.view(b, np, sq, sk)\n else:\n # input is 4D tensor (b, np, sq, sk)\n return ScaledMaskedSoftmax.apply(input, mask, scale)\n\n def forward_torch_softmax(self, input, mask):\n if self.input_in_float16 and self.softmax_in_fp32:\n input = input.float()\n\n if self.scale is not None:\n input = input * self.scale\n mask_output = self.mask_func(input, mask) if mask is not None else input\n probs = torch.nn.Softmax(dim=-1)(mask_output)\n\n if self.input_in_float16 and self.softmax_in_fp32:\n if self.input_in_fp16:\n probs = probs.half()\n else:\n probs = probs.bfloat16()\n\n return probs\n\n def get_batch_per_block(self, sq, sk, b, np):\n return scaled_masked_softmax.get_batch_per_block(sq, sk, b, np)\n", "path": "colossalai/kernel/cuda_native/scaled_softmax.py"}]}
| 2,765 | 160 |
gh_patches_debug_27579
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-2825
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ttGlyphSet] glyphSet + offset not passed correctly to glyf glyph
Reported here: https://github.com/adobe-type-tools/afdko/issues/1560
[ttGlyphSet] glyphSet + offset not passed correctly to glyf glyph
Reported here: https://github.com/adobe-type-tools/afdko/issues/1560
</issue>
<code>
[start of Lib/fontTools/ttLib/ttGlyphSet.py]
1 """GlyphSets returned by a TTFont."""
2
3 from abc import ABC, abstractmethod
4 from collections.abc import Mapping
5 from copy import copy
6 from fontTools.misc.fixedTools import otRound
7 from fontTools.misc.loggingTools import deprecateFunction
8
9
10 class _TTGlyphSet(Mapping):
11
12 """Generic dict-like GlyphSet class that pulls metrics from hmtx and
13 glyph shape from TrueType or CFF.
14 """
15
16 def __init__(self, font, location, glyphsMapping):
17 self.font = font
18 self.location = location
19 self.glyphsMapping = glyphsMapping
20 self.hMetrics = font["hmtx"].metrics
21 self.vMetrics = getattr(font.get("vmtx"), "metrics", None)
22 if location:
23 from fontTools.varLib.varStore import VarStoreInstancer
24
25 self.hvarTable = getattr(font.get("HVAR"), "table", None)
26 if self.hvarTable is not None:
27 self.hvarInstancer = VarStoreInstancer(
28 self.hvarTable.VarStore, font["fvar"].axes, location
29 )
30 # TODO VVAR, VORG
31
32 def __contains__(self, glyphName):
33 return glyphName in self.glyphsMapping
34
35 def __iter__(self):
36 return iter(self.glyphsMapping.keys())
37
38 def __len__(self):
39 return len(self.glyphsMapping)
40
41 @deprecateFunction(
42 "use 'glyphName in glyphSet' instead", category=DeprecationWarning
43 )
44 def has_key(self, glyphName):
45 return glyphName in self.glyphsMapping
46
47
48 class _TTGlyphSetGlyf(_TTGlyphSet):
49 def __init__(self, font, location):
50 self.glyfTable = font["glyf"]
51 super().__init__(font, location, self.glyfTable)
52 if location:
53 self.gvarTable = font.get("gvar")
54
55 def __getitem__(self, glyphName):
56 return _TTGlyphGlyf(self, glyphName)
57
58
59 class _TTGlyphSetCFF(_TTGlyphSet):
60 def __init__(self, font, location):
61 tableTag = "CFF2" if "CFF2" in font else "CFF "
62 self.charStrings = list(font[tableTag].cff.values())[0].CharStrings
63 super().__init__(font, location, self.charStrings)
64 self.blender = None
65 if location:
66 from fontTools.varLib.varStore import VarStoreInstancer
67
68 varStore = getattr(self.charStrings, "varStore", None)
69 if varStore is not None:
70 instancer = VarStoreInstancer(
71 varStore.otVarStore, font["fvar"].axes, location
72 )
73 self.blender = instancer.interpolateFromDeltas
74
75 def __getitem__(self, glyphName):
76 return _TTGlyphCFF(self, glyphName)
77
78
79 class _TTGlyph(ABC):
80
81 """Glyph object that supports the Pen protocol, meaning that it has
82 .draw() and .drawPoints() methods that take a pen object as their only
83 argument. Additionally there are 'width' and 'lsb' attributes, read from
84 the 'hmtx' table.
85
86 If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
87 attributes.
88 """
89
90 def __init__(self, glyphSet, glyphName):
91 self.glyphSet = glyphSet
92 self.name = glyphName
93 self.width, self.lsb = glyphSet.hMetrics[glyphName]
94 if glyphSet.vMetrics is not None:
95 self.height, self.tsb = glyphSet.vMetrics[glyphName]
96 else:
97 self.height, self.tsb = None, None
98 if glyphSet.location and glyphSet.hvarTable is not None:
99 varidx = (
100 glyphSet.font.getGlyphID(glyphName)
101 if glyphSet.hvarTable.AdvWidthMap is None
102 else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName]
103 )
104 self.width += glyphSet.hvarInstancer[varidx]
105 # TODO: VVAR/VORG
106
107 @abstractmethod
108 def draw(self, pen):
109 """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
110 how that works.
111 """
112 raise NotImplementedError
113
114 def drawPoints(self, pen):
115 """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
116 how that works.
117 """
118 from fontTools.pens.pointPen import SegmentToPointPen
119
120 self.draw(SegmentToPointPen(pen))
121
122
123 class _TTGlyphGlyf(_TTGlyph):
124 def draw(self, pen):
125 """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
126 how that works.
127 """
128 glyph, offset = self._getGlyphAndOffset()
129 glyph.draw(pen, offset)
130
131 def drawPoints(self, pen):
132 """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
133 how that works.
134 """
135 glyph, offset = self._getGlyphAndOffset()
136 glyph.drawPoints(pen, offset)
137
138 def _getGlyphAndOffset(self):
139 if self.glyphSet.location and self.glyphSet.gvarTable is not None:
140 glyph = self._getGlyphInstance()
141 else:
142 glyph = self.glyphSet.glyfTable[self.name]
143
144 offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
145 return glyph, offset
146
147 def _getGlyphInstance(self):
148 from fontTools.varLib.iup import iup_delta
149 from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
150 from fontTools.varLib.models import supportScalar
151
152 glyphSet = self.glyphSet
153 glyfTable = glyphSet.glyfTable
154 variations = glyphSet.gvarTable.variations[self.name]
155 hMetrics = glyphSet.hMetrics
156 vMetrics = glyphSet.vMetrics
157 coordinates, _ = glyfTable._getCoordinatesAndControls(
158 self.name, hMetrics, vMetrics
159 )
160 origCoords, endPts = None, None
161 for var in variations:
162 scalar = supportScalar(glyphSet.location, var.axes)
163 if not scalar:
164 continue
165 delta = var.coordinates
166 if None in delta:
167 if origCoords is None:
168 origCoords, control = glyfTable._getCoordinatesAndControls(
169 self.name, hMetrics, vMetrics
170 )
171 endPts = (
172 control[1] if control[0] >= 1 else list(range(len(control[1])))
173 )
174 delta = iup_delta(delta, origCoords, endPts)
175 coordinates += GlyphCoordinates(delta) * scalar
176
177 glyph = copy(glyfTable[self.name]) # Shallow copy
178 width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)
179 if glyphSet.hvarTable is None:
180 # no HVAR: let's set metrics from the phantom points
181 self.width = width
182 self.lsb = lsb
183 self.height = height
184 self.tsb = tsb
185 return glyph
186
187
188 class _TTGlyphCFF(_TTGlyph):
189 def draw(self, pen):
190 """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
191 how that works.
192 """
193 self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender)
194
195
196 def _setCoordinates(glyph, coord, glyfTable):
197 # Handle phantom points for (left, right, top, bottom) positions.
198 assert len(coord) >= 4
199 leftSideX = coord[-4][0]
200 rightSideX = coord[-3][0]
201 topSideY = coord[-2][1]
202 bottomSideY = coord[-1][1]
203
204 for _ in range(4):
205 del coord[-1]
206
207 if glyph.isComposite():
208 assert len(coord) == len(glyph.components)
209 glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
210 for p, comp in zip(coord, glyph.components):
211 if hasattr(comp, "x"):
212 comp.x, comp.y = p
213 elif glyph.numberOfContours == 0:
214 assert len(coord) == 0
215 else:
216 assert len(coord) == len(glyph.coordinates)
217 glyph.coordinates = coord
218
219 glyph.recalcBounds(glyfTable)
220
221 horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
222 verticalAdvanceWidth = otRound(topSideY - bottomSideY)
223 leftSideBearing = otRound(glyph.xMin - leftSideX)
224 topSideBearing = otRound(topSideY - glyph.yMax)
225 return (
226 horizontalAdvanceWidth,
227 leftSideBearing,
228 verticalAdvanceWidth,
229 topSideBearing,
230 )
231
[end of Lib/fontTools/ttLib/ttGlyphSet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py
--- a/Lib/fontTools/ttLib/ttGlyphSet.py
+++ b/Lib/fontTools/ttLib/ttGlyphSet.py
@@ -126,14 +126,14 @@
how that works.
"""
glyph, offset = self._getGlyphAndOffset()
- glyph.draw(pen, offset)
+ glyph.draw(pen, self.glyphSet.glyfTable, offset)
def drawPoints(self, pen):
"""Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
how that works.
"""
glyph, offset = self._getGlyphAndOffset()
- glyph.drawPoints(pen, offset)
+ glyph.drawPoints(pen, self.glyphSet.glyfTable, offset)
def _getGlyphAndOffset(self):
if self.glyphSet.location and self.glyphSet.gvarTable is not None:
@@ -176,12 +176,12 @@
glyph = copy(glyfTable[self.name]) # Shallow copy
width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)
+ self.lsb = lsb
+ self.tsb = tsb
if glyphSet.hvarTable is None:
# no HVAR: let's set metrics from the phantom points
self.width = width
- self.lsb = lsb
self.height = height
- self.tsb = tsb
return glyph
|
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/ttGlyphSet.py b/Lib/fontTools/ttLib/ttGlyphSet.py\n--- a/Lib/fontTools/ttLib/ttGlyphSet.py\n+++ b/Lib/fontTools/ttLib/ttGlyphSet.py\n@@ -126,14 +126,14 @@\n how that works.\n \"\"\"\n glyph, offset = self._getGlyphAndOffset()\n- glyph.draw(pen, offset)\n+ glyph.draw(pen, self.glyphSet.glyfTable, offset)\n \n def drawPoints(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details\n how that works.\n \"\"\"\n glyph, offset = self._getGlyphAndOffset()\n- glyph.drawPoints(pen, offset)\n+ glyph.drawPoints(pen, self.glyphSet.glyfTable, offset)\n \n def _getGlyphAndOffset(self):\n if self.glyphSet.location and self.glyphSet.gvarTable is not None:\n@@ -176,12 +176,12 @@\n \n glyph = copy(glyfTable[self.name]) # Shallow copy\n width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)\n+ self.lsb = lsb\n+ self.tsb = tsb\n if glyphSet.hvarTable is None:\n # no HVAR: let's set metrics from the phantom points\n self.width = width\n- self.lsb = lsb\n self.height = height\n- self.tsb = tsb\n return glyph\n", "issue": "[ttGlyphSet] glyphSet + offset not passed correctly to glyf glyph\nReported here: https://github.com/adobe-type-tools/afdko/issues/1560\n[ttGlyphSet] glyphSet + offset not passed correctly to glyf glyph\nReported here: https://github.com/adobe-type-tools/afdko/issues/1560\n", "before_files": [{"content": "\"\"\"GlyphSets returned by a TTFont.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Mapping\nfrom copy import copy\nfrom fontTools.misc.fixedTools import otRound\nfrom fontTools.misc.loggingTools import deprecateFunction\n\n\nclass _TTGlyphSet(Mapping):\n\n \"\"\"Generic dict-like GlyphSet class that pulls metrics from hmtx and\n glyph shape from TrueType or CFF.\n \"\"\"\n\n def __init__(self, font, location, glyphsMapping):\n self.font = font\n self.location = location\n self.glyphsMapping = glyphsMapping\n self.hMetrics = font[\"hmtx\"].metrics\n self.vMetrics = getattr(font.get(\"vmtx\"), \"metrics\", None)\n if location:\n from fontTools.varLib.varStore import VarStoreInstancer\n\n self.hvarTable = getattr(font.get(\"HVAR\"), \"table\", None)\n if self.hvarTable is not None:\n self.hvarInstancer = VarStoreInstancer(\n self.hvarTable.VarStore, font[\"fvar\"].axes, location\n )\n # TODO VVAR, VORG\n\n def __contains__(self, glyphName):\n return glyphName in self.glyphsMapping\n\n def __iter__(self):\n return iter(self.glyphsMapping.keys())\n\n def __len__(self):\n return len(self.glyphsMapping)\n\n @deprecateFunction(\n \"use 'glyphName in glyphSet' instead\", category=DeprecationWarning\n )\n def has_key(self, glyphName):\n return glyphName in self.glyphsMapping\n\n\nclass _TTGlyphSetGlyf(_TTGlyphSet):\n def __init__(self, font, location):\n self.glyfTable = font[\"glyf\"]\n super().__init__(font, location, self.glyfTable)\n if location:\n self.gvarTable = font.get(\"gvar\")\n\n def __getitem__(self, glyphName):\n return _TTGlyphGlyf(self, glyphName)\n\n\nclass _TTGlyphSetCFF(_TTGlyphSet):\n def __init__(self, font, location):\n tableTag = \"CFF2\" if \"CFF2\" in font else \"CFF \"\n self.charStrings = list(font[tableTag].cff.values())[0].CharStrings\n super().__init__(font, location, self.charStrings)\n self.blender = None\n if location:\n from fontTools.varLib.varStore import VarStoreInstancer\n\n varStore = getattr(self.charStrings, \"varStore\", None)\n if varStore is not None:\n instancer = VarStoreInstancer(\n varStore.otVarStore, font[\"fvar\"].axes, location\n )\n self.blender = instancer.interpolateFromDeltas\n\n def __getitem__(self, glyphName):\n return _TTGlyphCFF(self, glyphName)\n\n\nclass _TTGlyph(ABC):\n\n \"\"\"Glyph object that supports the Pen protocol, meaning that it has\n .draw() and .drawPoints() methods that take a pen object as their only\n argument. Additionally there are 'width' and 'lsb' attributes, read from\n the 'hmtx' table.\n\n If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'\n attributes.\n \"\"\"\n\n def __init__(self, glyphSet, glyphName):\n self.glyphSet = glyphSet\n self.name = glyphName\n self.width, self.lsb = glyphSet.hMetrics[glyphName]\n if glyphSet.vMetrics is not None:\n self.height, self.tsb = glyphSet.vMetrics[glyphName]\n else:\n self.height, self.tsb = None, None\n if glyphSet.location and glyphSet.hvarTable is not None:\n varidx = (\n glyphSet.font.getGlyphID(glyphName)\n if glyphSet.hvarTable.AdvWidthMap is None\n else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName]\n )\n self.width += glyphSet.hvarInstancer[varidx]\n # TODO: VVAR/VORG\n\n @abstractmethod\n def draw(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.basePen for details\n how that works.\n \"\"\"\n raise NotImplementedError\n\n def drawPoints(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details\n how that works.\n \"\"\"\n from fontTools.pens.pointPen import SegmentToPointPen\n\n self.draw(SegmentToPointPen(pen))\n\n\nclass _TTGlyphGlyf(_TTGlyph):\n def draw(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.basePen for details\n how that works.\n \"\"\"\n glyph, offset = self._getGlyphAndOffset()\n glyph.draw(pen, offset)\n\n def drawPoints(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details\n how that works.\n \"\"\"\n glyph, offset = self._getGlyphAndOffset()\n glyph.drawPoints(pen, offset)\n\n def _getGlyphAndOffset(self):\n if self.glyphSet.location and self.glyphSet.gvarTable is not None:\n glyph = self._getGlyphInstance()\n else:\n glyph = self.glyphSet.glyfTable[self.name]\n\n offset = self.lsb - glyph.xMin if hasattr(glyph, \"xMin\") else 0\n return glyph, offset\n\n def _getGlyphInstance(self):\n from fontTools.varLib.iup import iup_delta\n from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates\n from fontTools.varLib.models import supportScalar\n\n glyphSet = self.glyphSet\n glyfTable = glyphSet.glyfTable\n variations = glyphSet.gvarTable.variations[self.name]\n hMetrics = glyphSet.hMetrics\n vMetrics = glyphSet.vMetrics\n coordinates, _ = glyfTable._getCoordinatesAndControls(\n self.name, hMetrics, vMetrics\n )\n origCoords, endPts = None, None\n for var in variations:\n scalar = supportScalar(glyphSet.location, var.axes)\n if not scalar:\n continue\n delta = var.coordinates\n if None in delta:\n if origCoords is None:\n origCoords, control = glyfTable._getCoordinatesAndControls(\n self.name, hMetrics, vMetrics\n )\n endPts = (\n control[1] if control[0] >= 1 else list(range(len(control[1])))\n )\n delta = iup_delta(delta, origCoords, endPts)\n coordinates += GlyphCoordinates(delta) * scalar\n\n glyph = copy(glyfTable[self.name]) # Shallow copy\n width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)\n if glyphSet.hvarTable is None:\n # no HVAR: let's set metrics from the phantom points\n self.width = width\n self.lsb = lsb\n self.height = height\n self.tsb = tsb\n return glyph\n\n\nclass _TTGlyphCFF(_TTGlyph):\n def draw(self, pen):\n \"\"\"Draw the glyph onto ``pen``. See fontTools.pens.basePen for details\n how that works.\n \"\"\"\n self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender)\n\n\ndef _setCoordinates(glyph, coord, glyfTable):\n # Handle phantom points for (left, right, top, bottom) positions.\n assert len(coord) >= 4\n leftSideX = coord[-4][0]\n rightSideX = coord[-3][0]\n topSideY = coord[-2][1]\n bottomSideY = coord[-1][1]\n\n for _ in range(4):\n del coord[-1]\n\n if glyph.isComposite():\n assert len(coord) == len(glyph.components)\n glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy\n for p, comp in zip(coord, glyph.components):\n if hasattr(comp, \"x\"):\n comp.x, comp.y = p\n elif glyph.numberOfContours == 0:\n assert len(coord) == 0\n else:\n assert len(coord) == len(glyph.coordinates)\n glyph.coordinates = coord\n\n glyph.recalcBounds(glyfTable)\n\n horizontalAdvanceWidth = otRound(rightSideX - leftSideX)\n verticalAdvanceWidth = otRound(topSideY - bottomSideY)\n leftSideBearing = otRound(glyph.xMin - leftSideX)\n topSideBearing = otRound(topSideY - glyph.yMax)\n return (\n horizontalAdvanceWidth,\n leftSideBearing,\n verticalAdvanceWidth,\n topSideBearing,\n )\n", "path": "Lib/fontTools/ttLib/ttGlyphSet.py"}]}
| 3,165 | 362 |
gh_patches_debug_21815
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-3189
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build/install broken without cython
## Long story short
Latest master cannot be installed without cython.
```
clang: error: no such file or directory: 'aiohttp/_websocket.c'
```
## Expected behaviour
```
pip install git+http://github.com/aio-libs/aiohttp
```
Should install aiohttp even without cython. It worked at least in previous versions.
## Actual behaviour
Build failed:
```
running build_ext
building 'aiohttp._websocket' extension
creating build/temp.macosx-10.13-x86_64-3.7
creating build/temp.macosx-10.13-x86_64-3.7/aiohttp
gcc -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -I/Users/tosha/.pythonz/pythons/CPython-3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m -c aiohttp/_websocket.c -o build/temp.macosx-10.13-x86_64-3.7/aiohttp/_websocket.o
clang: error: no such file or directory: 'aiohttp/_websocket.c'
clang: error: no input files
error: command 'gcc' failed with exit status 1
```
## Steps to reproduce
Run
```
pip install git+http://github.com/aio-libs/aiohttp
```
in fresh empty venv.
## Your environment
macOS 10.13.6/Python 3.7/empty vevn
I am sure that the problem is OS/Python version independent.
Build/install broken without cython
## Long story short
Latest master cannot be installed without cython.
```
clang: error: no such file or directory: 'aiohttp/_websocket.c'
```
## Expected behaviour
```
pip install git+http://github.com/aio-libs/aiohttp
```
Should install aiohttp even without cython. It worked at least in previous versions.
## Actual behaviour
Build failed:
```
running build_ext
building 'aiohttp._websocket' extension
creating build/temp.macosx-10.13-x86_64-3.7
creating build/temp.macosx-10.13-x86_64-3.7/aiohttp
gcc -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -I/Users/tosha/.pythonz/pythons/CPython-3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m -c aiohttp/_websocket.c -o build/temp.macosx-10.13-x86_64-3.7/aiohttp/_websocket.o
clang: error: no such file or directory: 'aiohttp/_websocket.c'
clang: error: no input files
error: command 'gcc' failed with exit status 1
```
## Steps to reproduce
Run
```
pip install git+http://github.com/aio-libs/aiohttp
```
in fresh empty venv.
## Your environment
macOS 10.13.6/Python 3.7/empty vevn
I am sure that the problem is OS/Python version independent.
</issue>
<code>
[start of setup.py]
1 import codecs
2 import pathlib
3 import re
4 import sys
5 from distutils.command.build_ext import build_ext
6 from distutils.errors import (CCompilerError, DistutilsExecError,
7 DistutilsPlatformError)
8
9 from setuptools import Extension, setup
10
11
12 if sys.version_info < (3, 5, 3):
13 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
14
15
16 try:
17 from Cython.Build import cythonize
18 USE_CYTHON = True
19 except ImportError:
20 USE_CYTHON = False
21
22 ext = '.pyx' if USE_CYTHON else '.c'
23
24
25 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),
26 Extension('aiohttp._http_parser',
27 ['aiohttp/_http_parser' + ext,
28 'vendor/http-parser/http_parser.c',
29 'aiohttp/_find_header.c'],
30 define_macros=[('HTTP_PARSER_STRICT', 0)],
31 ),
32 Extension('aiohttp._frozenlist',
33 ['aiohttp/_frozenlist' + ext]),
34 Extension('aiohttp._helpers',
35 ['aiohttp/_helpers' + ext]),
36 Extension('aiohttp._http_writer',
37 ['aiohttp/_http_writer' + ext])]
38
39
40 if USE_CYTHON:
41 extensions = cythonize(extensions)
42
43
44 class BuildFailed(Exception):
45 pass
46
47
48 class ve_build_ext(build_ext):
49 # This class allows C extension building to fail.
50
51 def run(self):
52 try:
53 build_ext.run(self)
54 except (DistutilsPlatformError, FileNotFoundError):
55 raise BuildFailed()
56
57 def build_extension(self, ext):
58 try:
59 build_ext.build_extension(self, ext)
60 except (DistutilsExecError,
61 DistutilsPlatformError, ValueError):
62 raise BuildFailed()
63
64
65 here = pathlib.Path(__file__).parent
66
67 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
68 try:
69 version = re.findall(r"^__version__ = '([^']+)'\r?$",
70 txt, re.M)[0]
71 except IndexError:
72 raise RuntimeError('Unable to determine version.')
73
74 install_requires = [
75 'attrs>=17.3.0',
76 'chardet>=2.0,<4.0',
77 'multidict>=4.0,<5.0',
78 'async_timeout>=3.0,<4.0',
79 'yarl>=1.0,<2.0',
80 'idna-ssl>=1.0; python_version<"3.7"',
81 ]
82
83
84 def read(f):
85 return (here / f).read_text('utf-8').strip()
86
87
88 NEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)
89 pytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []
90
91 tests_require = ['pytest', 'gunicorn',
92 'pytest-timeout', 'async-generator']
93
94
95 args = dict(
96 name='aiohttp',
97 version=version,
98 description='Async http client/server framework (asyncio)',
99 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
100 classifiers=[
101 'License :: OSI Approved :: Apache Software License',
102 'Intended Audience :: Developers',
103 'Programming Language :: Python',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.5',
106 'Programming Language :: Python :: 3.6',
107 'Programming Language :: Python :: 3.7',
108 'Development Status :: 5 - Production/Stable',
109 'Operating System :: POSIX',
110 'Operating System :: MacOS :: MacOS X',
111 'Operating System :: Microsoft :: Windows',
112 'Topic :: Internet :: WWW/HTTP',
113 'Framework :: AsyncIO',
114 ],
115 author='Nikolay Kim',
116 author_email='[email protected]',
117 maintainer=', '.join(('Nikolay Kim <[email protected]>',
118 'Andrew Svetlov <[email protected]>')),
119 maintainer_email='[email protected]',
120 url='https://github.com/aio-libs/aiohttp',
121 project_urls={
122 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
123 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
124 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
125 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
126 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
127 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
128 'Docs: RTD': 'https://docs.aiohttp.org',
129 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
130 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
131 },
132 license='Apache 2',
133 packages=['aiohttp'],
134 python_requires='>=3.5.3',
135 install_requires=install_requires,
136 tests_require=tests_require,
137 setup_requires=pytest_runner,
138 include_package_data=True,
139 ext_modules=extensions,
140 cmdclass=dict(build_ext=ve_build_ext),
141 )
142
143 try:
144 setup(**args)
145 except BuildFailed:
146 print("************************************************************")
147 print("Cannot compile C accelerator module, use pure python version")
148 print("************************************************************")
149 del args['ext_modules']
150 del args['cmdclass']
151 setup(**args)
152
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,7 @@
if sys.version_info < (3, 5, 3):
raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
+here = pathlib.Path(__file__).parent
try:
from Cython.Build import cythonize
@@ -19,6 +20,20 @@
except ImportError:
USE_CYTHON = False
+if (here / '.git').exists() and not USE_CYTHON:
+ print("Install cython when building from git clone", file=sys.stderr)
+ print("Hint:", file=sys.stderr)
+ print(" pip install cython", file=sys.stderr)
+ sys.exit(1)
+
+
+if (here / '.git').exists() and not (here / 'vendor/http-parser/README.md'):
+ print("Install submodules when building from git clone", file=sys.stderr)
+ print("Hint:", file=sys.stderr)
+ print(" git submodule update --init", file=sys.stderr)
+ sys.exit(2)
+
+
ext = '.pyx' if USE_CYTHON else '.c'
@@ -62,7 +77,6 @@
raise BuildFailed()
-here = pathlib.Path(__file__).parent
txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
try:
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,6 +12,7 @@\n if sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n \n+here = pathlib.Path(__file__).parent\n \n try:\n from Cython.Build import cythonize\n@@ -19,6 +20,20 @@\n except ImportError:\n USE_CYTHON = False\n \n+if (here / '.git').exists() and not USE_CYTHON:\n+ print(\"Install cython when building from git clone\", file=sys.stderr)\n+ print(\"Hint:\", file=sys.stderr)\n+ print(\" pip install cython\", file=sys.stderr)\n+ sys.exit(1)\n+\n+\n+if (here / '.git').exists() and not (here / 'vendor/http-parser/README.md'):\n+ print(\"Install submodules when building from git clone\", file=sys.stderr)\n+ print(\"Hint:\", file=sys.stderr)\n+ print(\" git submodule update --init\", file=sys.stderr)\n+ sys.exit(2)\n+\n+\n ext = '.pyx' if USE_CYTHON else '.c'\n \n \n@@ -62,7 +77,6 @@\n raise BuildFailed()\n \n \n-here = pathlib.Path(__file__).parent\n \n txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\n try:\n", "issue": "Build/install broken without cython\n## Long story short\r\n\r\nLatest master cannot be installed without cython.\r\n```\r\nclang: error: no such file or directory: 'aiohttp/_websocket.c'\r\n```\r\n\r\n## Expected behaviour\r\n\r\n```\r\npip install git+http://github.com/aio-libs/aiohttp\r\n```\r\nShould install aiohttp even without cython. It worked at least in previous versions.\r\n\r\n## Actual behaviour\r\n\r\nBuild failed:\r\n```\r\n running build_ext\r\n building 'aiohttp._websocket' extension\r\n creating build/temp.macosx-10.13-x86_64-3.7\r\n creating build/temp.macosx-10.13-x86_64-3.7/aiohttp\r\n gcc -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -I/Users/tosha/.pythonz/pythons/CPython-3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m -c aiohttp/_websocket.c -o build/temp.macosx-10.13-x86_64-3.7/aiohttp/_websocket.o\r\n clang: error: no such file or directory: 'aiohttp/_websocket.c'\r\n clang: error: no input files\r\n error: command 'gcc' failed with exit status 1\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nRun\r\n```\r\npip install git+http://github.com/aio-libs/aiohttp\r\n```\r\nin fresh empty venv.\r\n\r\n## Your environment\r\n\r\nmacOS 10.13.6/Python 3.7/empty vevn\r\nI am sure that the problem is OS/Python version independent.\nBuild/install broken without cython\n## Long story short\r\n\r\nLatest master cannot be installed without cython.\r\n```\r\nclang: error: no such file or directory: 'aiohttp/_websocket.c'\r\n```\r\n\r\n## Expected behaviour\r\n\r\n```\r\npip install git+http://github.com/aio-libs/aiohttp\r\n```\r\nShould install aiohttp even without cython. It worked at least in previous versions.\r\n\r\n## Actual behaviour\r\n\r\nBuild failed:\r\n```\r\n running build_ext\r\n building 'aiohttp._websocket' extension\r\n creating build/temp.macosx-10.13-x86_64-3.7\r\n creating build/temp.macosx-10.13-x86_64-3.7/aiohttp\r\n gcc -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -I/Users/tosha/.pythonz/pythons/CPython-3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m -c aiohttp/_websocket.c -o build/temp.macosx-10.13-x86_64-3.7/aiohttp/_websocket.o\r\n clang: error: no such file or directory: 'aiohttp/_websocket.c'\r\n clang: error: no input files\r\n error: command 'gcc' failed with exit status 1\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nRun\r\n```\r\npip install git+http://github.com/aio-libs/aiohttp\r\n```\r\nin fresh empty venv.\r\n\r\n## Your environment\r\n\r\nmacOS 10.13.6/Python 3.7/empty vevn\r\nI am sure that the problem is OS/Python version independent.\n", "before_files": [{"content": "import codecs\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\n\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket' + ext]),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser' + ext,\n 'vendor/http-parser/http_parser.c',\n 'aiohttp/_find_header.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist' + ext]),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers' + ext]),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer' + ext])]\n\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\nhere = pathlib.Path(__file__).parent\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\ninstall_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n]\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nNEEDS_PYTEST = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest-runner'] if NEEDS_PYTEST else []\n\ntests_require = ['pytest', 'gunicorn',\n 'pytest-timeout', 'async-generator']\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n tests_require=tests_require,\n setup_requires=pytest_runner,\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n)\n\ntry:\n setup(**args)\nexcept BuildFailed:\n print(\"************************************************************\")\n print(\"Cannot compile C accelerator module, use pure python version\")\n print(\"************************************************************\")\n del args['ext_modules']\n del args['cmdclass']\n setup(**args)\n", "path": "setup.py"}]}
| 2,882 | 319 |
gh_patches_debug_3534
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1462
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
favicon for documentation
## 🚀 Feature
There shall be a favicon for Ignite documentation, currently it's pytorch favicon
cc: @vfdev-5
</issue>
<code>
[start of docs/source/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/stable/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 import os
16 import sys
17
18 sys.path.insert(0, os.path.abspath("../.."))
19 import ignite
20 import pytorch_sphinx_theme
21
22 # -- Project information -----------------------------------------------------
23
24 project = "ignite"
25 copyright = "2020, PyTorch-Ignite Contributors"
26 author = "PyTorch-Ignite Contributors"
27
28 # The short X.Y version
29 try:
30 version = os.environ["code_version"]
31 if "master" in version:
32 version = "master (" + ignite.__version__ + ")"
33 else:
34 version = version.replace("v", "")
35 except KeyError:
36 version = ignite.__version__
37
38 # The full version, including alpha/beta/rc tags
39 release = "master"
40
41
42 # -- General configuration ---------------------------------------------------
43
44 # If your documentation needs a minimal Sphinx version, state it here.
45 #
46 # needs_sphinx = '1.0'
47
48 # Add any Sphinx extension module names here, as strings. They can be
49 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
50 # ones.
51 extensions = [
52 "sphinx.ext.autosummary",
53 "sphinx.ext.doctest",
54 "sphinx.ext.intersphinx",
55 "sphinx.ext.todo",
56 "sphinx.ext.coverage",
57 "sphinx.ext.mathjax",
58 "sphinx.ext.napoleon",
59 "sphinx.ext.viewcode",
60 "sphinx.ext.autosectionlabel",
61 ]
62
63 # Add any paths that contain templates here, relative to this directory.
64 templates_path = ["_templates"]
65
66 # The suffix(es) of source filenames.
67 # You can specify multiple suffix as a list of string:
68 #
69 # source_suffix = ['.rst', '.md']
70 source_suffix = ".rst"
71
72 # The master toctree document.
73 master_doc = "index"
74
75 # The language for content autogenerated by Sphinx. Refer to documentation
76 # for a list of supported languages.
77 #
78 # This is also used if you do content translation via gettext catalogs.
79 # Usually you set "language" from the command line for these cases.
80 language = None
81
82 # List of patterns, relative to source directory, that match files and
83 # directories to ignore when looking for source files.
84 # This pattern also affects html_static_path and html_extra_path .
85 exclude_patterns = []
86
87 # The name of the Pygments (syntax highlighting) style to use.
88 pygments_style = "sphinx"
89
90
91 # -- Options for HTML output -------------------------------------------------
92
93 # The theme to use for HTML and HTML Help pages. See the documentation for
94 # a list of builtin themes.
95 #
96 html_theme = "pytorch_sphinx_theme"
97 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
98
99 html_theme_options = {
100 "canonical_url": "https://pytorch.org/ignite/index.html",
101 "collapse_navigation": False,
102 "display_version": True,
103 "logo_only": True,
104 }
105
106 html_logo = "_static/img/ignite_logo.svg"
107
108 # Theme options are theme-specific and customize the look and feel of a theme
109 # further. For a list of options available for each theme, see the
110 # documentation.
111 #
112 # html_theme_options = {}
113
114 # Add any paths that contain custom static files (such as style sheets) here,
115 # relative to this directory. They are copied after the builtin static files,
116 # so a file named "default.css" will overwrite the builtin "default.css".
117 html_static_path = ["_static", "_templates/_static"]
118
119 html_context = {
120 "css_files": [
121 # 'https://fonts.googleapis.com/css?family=Lato',
122 # '_static/css/pytorch_theme.css'
123 "_static/css/ignite_theme.css"
124 ],
125 }
126
127
128 # -- Options for HTMLHelp output ---------------------------------------------
129
130 # Output file base name for HTML help builder.
131 htmlhelp_basename = "ignitedoc"
132
133
134 # -- Options for LaTeX output ------------------------------------------------
135
136 latex_elements = {
137 # The paper size ('letterpaper' or 'a4paper').
138 #
139 # 'papersize': 'letterpaper',
140 # The font size ('10pt', '11pt' or '12pt').
141 #
142 # 'pointsize': '10pt',
143 # Additional stuff for the LaTeX preamble.
144 #
145 # 'preamble': '',
146 # Latex figure (float) alignment
147 #
148 # 'figure_align': 'htbp',
149 }
150
151 # Grouping the document tree into LaTeX files. List of tuples
152 # (source start file, target name, title,
153 # author, documentclass [howto, manual, or own class]).
154 latex_documents = [
155 (master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"),
156 ]
157
158
159 # -- Options for manual page output ------------------------------------------
160
161 # One entry per manual page. List of tuples
162 # (source start file, name, description, authors, manual section).
163 man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)]
164
165
166 # -- Options for Texinfo output ----------------------------------------------
167
168 # Grouping the document tree into Texinfo files. List of tuples
169 # (source start file, target name, title, author,
170 # dir menu entry, description, category)
171 texinfo_documents = [
172 (
173 master_doc,
174 "ignite",
175 "ignite Documentation",
176 author,
177 "ignite",
178 "One line description of project.",
179 "Miscellaneous",
180 ),
181 ]
182
183
184 # -- Extension configuration -------------------------------------------------
185
186 # -- Options for intersphinx extension ---------------------------------------
187
188 # Example configuration for intersphinx: refer to the Python standard library.
189 intersphinx_mapping = {"https://docs.python.org/": None}
190
191 # -- Options for todo extension ----------------------------------------------
192
193 # If true, `todo` and `todoList` produce output, else they produce nothing.
194 todo_include_todos = True
195
196 # -- Type hints configs ------------------------------------------------------
197
198 autodoc_typehints = "signature"
199
200 # -- A patch that turns-off cross refs for type annotations ------------------
201
202 import sphinx.domains.python
203 from docutils import nodes
204 from sphinx import addnodes
205
206 # replaces pending_xref node with desc_type for type annotations
207 sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type("", nodes.Text(t))
208
209 # -- Autosummary patch to get list of a classes, funcs automatically ----------
210
211 from importlib import import_module
212 from inspect import getmembers, isclass, isfunction
213 import sphinx.ext.autosummary
214 from sphinx.ext.autosummary import Autosummary
215 from docutils.parsers.rst import directives
216 from docutils.statemachine import StringList
217
218
219 class BetterAutosummary(Autosummary):
220 """Autosummary with autolisting for modules.
221
222 By default it tries to import all public names (__all__),
223 otherwise import all classes and/or functions in a module.
224
225 Options:
226 - :autolist: option to get list of classes and functions from currentmodule.
227 - :autolist-classes: option to get list of classes from currentmodule.
228 - :autolist-functions: option to get list of functions from currentmodule.
229
230 Example Usage:
231
232 .. currentmodule:: ignite.metrics
233
234 .. autosummary::
235 :nosignatures:
236 :autolist:
237 """
238
239 # Add new option
240 _option_spec = Autosummary.option_spec.copy()
241 _option_spec.update(
242 {
243 "autolist": directives.unchanged,
244 "autolist-classes": directives.unchanged,
245 "autolist-functions": directives.unchanged,
246 }
247 )
248 option_spec = _option_spec
249
250 def run(self):
251 for auto in ("autolist", "autolist-classes", "autolist-functions"):
252 if auto in self.options:
253 # Get current module name
254 module_name = self.env.ref_context.get("py:module")
255 # Import module
256 module = import_module(module_name)
257
258 # Get public names (if possible)
259 try:
260 names = getattr(module, "__all__")
261 except AttributeError:
262 # Get classes defined in the module
263 cls_names = [
264 name[0]
265 for name in getmembers(module, isclass)
266 if name[-1].__module__ == module_name and not (name[0].startswith("_"))
267 ]
268 # Get functions defined in the module
269 fn_names = [
270 name[0]
271 for name in getmembers(module, isfunction)
272 if (name[-1].__module__ == module_name) and not (name[0].startswith("_"))
273 ]
274 names = cls_names + fn_names
275 # It may happen that module doesn't have any defined class or func
276 if not names:
277 names = [name[0] for name in getmembers(module)]
278
279 # Filter out members w/o doc strings
280 names = [name for name in names if getattr(module, name).__doc__ is not None]
281
282 if auto == "autolist":
283 # Get list of all classes and functions inside module
284 names = [
285 name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))
286 ]
287 else:
288 if auto == "autolist-classes":
289 # Get only classes
290 check = isclass
291 elif auto == "autolist-functions":
292 # Get only functions
293 check = isfunction
294 else:
295 raise NotImplementedError
296
297 names = [name for name in names if check(getattr(module, name))]
298
299 # Update content
300 self.content = StringList(names)
301 return super().run()
302
303
304 # Patch original Autosummary
305 sphinx.ext.autosummary.Autosummary = BetterAutosummary
306
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -105,6 +105,8 @@
html_logo = "_static/img/ignite_logo.svg"
+html_favicon = "_templates/_static/img/ignite_logomark.svg"
+
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -105,6 +105,8 @@\n \n html_logo = \"_static/img/ignite_logo.svg\"\n \n+html_favicon = \"_templates/_static/img/ignite_logomark.svg\"\n+\n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n # documentation.\n", "issue": "favicon for documentation\n## \ud83d\ude80 Feature\r\n\r\nThere shall be a favicon for Ignite documentation, currently it's pytorch favicon\r\n\r\ncc: @vfdev-5 \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite_logo.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Type hints configs ------------------------------------------------------\n\nautodoc_typehints = \"signature\"\n\n# -- A patch that turns-off cross refs for type annotations ------------------\n\nimport sphinx.domains.python\nfrom docutils import nodes\nfrom sphinx import addnodes\n\n# replaces pending_xref node with desc_type for type annotations\nsphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(\"\", nodes.Text(t))\n\n# -- Autosummary patch to get list of a classes, funcs automatically ----------\n\nfrom importlib import import_module\nfrom inspect import getmembers, isclass, isfunction\nimport sphinx.ext.autosummary\nfrom sphinx.ext.autosummary import Autosummary\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import StringList\n\n\nclass BetterAutosummary(Autosummary):\n \"\"\"Autosummary with autolisting for modules.\n\n By default it tries to import all public names (__all__),\n otherwise import all classes and/or functions in a module.\n\n Options:\n - :autolist: option to get list of classes and functions from currentmodule.\n - :autolist-classes: option to get list of classes from currentmodule.\n - :autolist-functions: option to get list of functions from currentmodule.\n\n Example Usage:\n\n .. currentmodule:: ignite.metrics\n\n .. autosummary::\n :nosignatures:\n :autolist:\n \"\"\"\n\n # Add new option\n _option_spec = Autosummary.option_spec.copy()\n _option_spec.update(\n {\n \"autolist\": directives.unchanged,\n \"autolist-classes\": directives.unchanged,\n \"autolist-functions\": directives.unchanged,\n }\n )\n option_spec = _option_spec\n\n def run(self):\n for auto in (\"autolist\", \"autolist-classes\", \"autolist-functions\"):\n if auto in self.options:\n # Get current module name\n module_name = self.env.ref_context.get(\"py:module\")\n # Import module\n module = import_module(module_name)\n\n # Get public names (if possible)\n try:\n names = getattr(module, \"__all__\")\n except AttributeError:\n # Get classes defined in the module\n cls_names = [\n name[0]\n for name in getmembers(module, isclass)\n if name[-1].__module__ == module_name and not (name[0].startswith(\"_\"))\n ]\n # Get functions defined in the module\n fn_names = [\n name[0]\n for name in getmembers(module, isfunction)\n if (name[-1].__module__ == module_name) and not (name[0].startswith(\"_\"))\n ]\n names = cls_names + fn_names\n # It may happen that module doesn't have any defined class or func\n if not names:\n names = [name[0] for name in getmembers(module)]\n\n # Filter out members w/o doc strings\n names = [name for name in names if getattr(module, name).__doc__ is not None]\n\n if auto == \"autolist\":\n # Get list of all classes and functions inside module\n names = [\n name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))\n ]\n else:\n if auto == \"autolist-classes\":\n # Get only classes\n check = isclass\n elif auto == \"autolist-functions\":\n # Get only functions\n check = isfunction\n else:\n raise NotImplementedError\n\n names = [name for name in names if check(getattr(module, name))]\n\n # Update content\n self.content = StringList(names)\n return super().run()\n\n\n# Patch original Autosummary\nsphinx.ext.autosummary.Autosummary = BetterAutosummary\n", "path": "docs/source/conf.py"}]}
| 3,555 | 109 |
gh_patches_debug_448
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-1770
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable multiprocessing when testing with GPU and support distributed strategies in the tests.
**Describe the feature and the current behavior/state.**
Here I'm not going to discuss the bazel case as it's much more complicated to handle, and we currently advertize using pytest anyway to run the tests. We can of course make sure everything is compatible though.
This revamping of gpu testing has multiple objectives:
* The tests should behave the same weither the contributor has a gpu or not. Meaning we shouldn't run all the tests on a gpu just because a gpu is available, otherwise it hurts reproducibility.
* The test suite should be able to run with multiple workers in kokoro or when a user has multiple gpus. Pytest should use all gpus visible by the main process.
* We need to support testing with distributed strategies. Currently it doesn't work. A fix has been started in #1209 but we need to update it for pytest.
* Making the whole thing simple to use and to maintain. Notably, we would get rid of this file: https://github.com/tensorflow/addons/blob/master/tools/testing/parallel_gpu_execute.sh which is quite hard to work on.
To do all that, here is my proposal:
Stuff to know:
* Pytest-xdist uses multiprocessing to run the tests, not multithreading.
* 2 environement variables are available in each of the workers to identify them. https://github.com/pytest-dev/pytest-xdist#identifying-the-worker-process-during-a-test
### Test workers
Suppose we have a machine with 10CPUs and 4 GPUs, 10 processes will start to run the test suite. Workers 0 to 3 will have ownership of one GPU each (we can use CUDA_VISIBLE_DEVICES to enforce that, but I'm not even sure that's needed with the proposed implementation). Workers 4 to 9 will have no gpu available.
### Virtual devices
Each of those processes, when starting, will split their physical device into 2 virtual device. Tests that just need to run on gpu will use the first of those virtual devices. Processes which need to test distributed strategies will use the two of them. We assume here that 2 virtual devices are enough to test distributed strategies.
### Impact on the contributors:
For this whole machinery to work, we need to know which test needs to run on CPU, GPU, or in distributed strategies. To do that we'll use pytest markers: `@pytest.mark.....`
* By default, if no marker is found, the test will run on CPU: `with device("CPU:0")`. It's equivalent to
`@pytest.mark.run_on(["cpu"])`.
* To run with gpu only: `@pytest.mark.run_on(["gpu"])`.
* To run on the cpu and gpu: `@pytest.mark.run_on(["cpu", "gpu"])` (test runs twice)
* To run in within a distributed strategy `@pytest.mark.run_on(["distributed strategy"])`. (runs once here).
* To run with everything `@pytest.mark.run_on(["cpu", "gpu", "distributed strategy"])`
* To make crazy stuff, and not run the test in any device scope: `@pytest.mark.no_device_scope`. Then the contributor can do whatever he/she wants in the test.
Of course, if no gpu is available, we just skip the tests needing a distribution strategy or the gpu. Contributors who handle the devices manually have to make sure to skip manually the test if the gpu is used.
Since gpu are often the scarsest ressource (nb gpus << nb cpus), tests needing the gpu will also be marked with `@pytest.mark.tryfirst` to ensure that we don't have workers starvation at the end (to get maximum speed).
To implement that, we need first to convert all tests to pytest (as opposed to unittest) it's currently 80% done and thanks a lot @autoih for putting a LOT of work into that.
**Relevant information**
- Are you willing to contribute it (yes/no): yes
- Are you willing to maintain it going forward? (yes/no): yes
- Is there a relevant academic paper? (if so, where): no
- Is there already an implementation in another framework? (if so, where): no
- Was it part of tf.contrib? (if so, where): no
**Which API type would this fall under (layer, metric, optimizer, etc.)**
Testing
**Who will benefit with this feature?**
Contributors with gpu, CI.
**Any other info.**
I believe that the implementation will first go in tensorflow addons because we have 4 GPUs available in the CI. Later on when it's stable we can split it from tensorflow addons and make it a separate pytest plugin with a public API.
Comments welcome. Especially from @Squadrick , @hyang0129 , @seanpmorgan since I'm not a ninja of tf.device.
</issue>
<code>
[start of tensorflow_addons/conftest.py]
1 from tensorflow_addons.utils.test_utils import ( # noqa: F401
2 maybe_run_functions_eagerly,
3 pytest_make_parametrize_id,
4 data_format,
5 set_seeds,
6 pytest_addoption,
7 set_global_variables,
8 pytest_configure,
9 device,
10 pytest_generate_tests,
11 )
12
13 # fixtures present in this file will be available
14 # when running tests and can be referenced with strings
15 # https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions
16
[end of tensorflow_addons/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py
--- a/tensorflow_addons/conftest.py
+++ b/tensorflow_addons/conftest.py
@@ -8,6 +8,7 @@
pytest_configure,
device,
pytest_generate_tests,
+ pytest_collection_modifyitems,
)
# fixtures present in this file will be available
|
{"golden_diff": "diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py\n--- a/tensorflow_addons/conftest.py\n+++ b/tensorflow_addons/conftest.py\n@@ -8,6 +8,7 @@\n pytest_configure,\n device,\n pytest_generate_tests,\n+ pytest_collection_modifyitems,\n )\n \n # fixtures present in this file will be available\n", "issue": "Enable multiprocessing when testing with GPU and support distributed strategies in the tests.\n**Describe the feature and the current behavior/state.**\r\n\r\nHere I'm not going to discuss the bazel case as it's much more complicated to handle, and we currently advertize using pytest anyway to run the tests. We can of course make sure everything is compatible though.\r\n\r\nThis revamping of gpu testing has multiple objectives:\r\n\r\n* The tests should behave the same weither the contributor has a gpu or not. Meaning we shouldn't run all the tests on a gpu just because a gpu is available, otherwise it hurts reproducibility.\r\n* The test suite should be able to run with multiple workers in kokoro or when a user has multiple gpus. Pytest should use all gpus visible by the main process.\r\n* We need to support testing with distributed strategies. Currently it doesn't work. A fix has been started in #1209 but we need to update it for pytest. \r\n* Making the whole thing simple to use and to maintain. Notably, we would get rid of this file: https://github.com/tensorflow/addons/blob/master/tools/testing/parallel_gpu_execute.sh which is quite hard to work on.\r\n\r\n\r\nTo do all that, here is my proposal:\r\n\r\nStuff to know: \r\n* Pytest-xdist uses multiprocessing to run the tests, not multithreading. \r\n* 2 environement variables are available in each of the workers to identify them. https://github.com/pytest-dev/pytest-xdist#identifying-the-worker-process-during-a-test\r\n\r\n\r\n### Test workers\r\nSuppose we have a machine with 10CPUs and 4 GPUs, 10 processes will start to run the test suite. Workers 0 to 3 will have ownership of one GPU each (we can use CUDA_VISIBLE_DEVICES to enforce that, but I'm not even sure that's needed with the proposed implementation). Workers 4 to 9 will have no gpu available.\r\n\r\n### Virtual devices\r\nEach of those processes, when starting, will split their physical device into 2 virtual device. Tests that just need to run on gpu will use the first of those virtual devices. Processes which need to test distributed strategies will use the two of them. We assume here that 2 virtual devices are enough to test distributed strategies.\r\n\r\n### Impact on the contributors:\r\nFor this whole machinery to work, we need to know which test needs to run on CPU, GPU, or in distributed strategies. To do that we'll use pytest markers: `@pytest.mark.....`\r\n\r\n* By default, if no marker is found, the test will run on CPU: `with device(\"CPU:0\")`. It's equivalent to \r\n`@pytest.mark.run_on([\"cpu\"])`.\r\n* To run with gpu only: `@pytest.mark.run_on([\"gpu\"])`.\r\n* To run on the cpu and gpu: `@pytest.mark.run_on([\"cpu\", \"gpu\"])` (test runs twice)\r\n* To run in within a distributed strategy `@pytest.mark.run_on([\"distributed strategy\"])`. (runs once here).\r\n* To run with everything `@pytest.mark.run_on([\"cpu\", \"gpu\", \"distributed strategy\"])`\r\n* To make crazy stuff, and not run the test in any device scope: `@pytest.mark.no_device_scope`. Then the contributor can do whatever he/she wants in the test.\r\n\r\nOf course, if no gpu is available, we just skip the tests needing a distribution strategy or the gpu. Contributors who handle the devices manually have to make sure to skip manually the test if the gpu is used.\r\n\r\nSince gpu are often the scarsest ressource (nb gpus << nb cpus), tests needing the gpu will also be marked with `@pytest.mark.tryfirst` to ensure that we don't have workers starvation at the end (to get maximum speed).\r\n\r\nTo implement that, we need first to convert all tests to pytest (as opposed to unittest) it's currently 80% done and thanks a lot @autoih for putting a LOT of work into that.\r\n\r\n\r\n\r\n**Relevant information**\r\n- Are you willing to contribute it (yes/no): yes\r\n- Are you willing to maintain it going forward? (yes/no): yes\r\n- Is there a relevant academic paper? (if so, where): no\r\n- Is there already an implementation in another framework? (if so, where): no\r\n- Was it part of tf.contrib? (if so, where): no\r\n\r\n**Which API type would this fall under (layer, metric, optimizer, etc.)**\r\n\r\nTesting\r\n\r\n**Who will benefit with this feature?**\r\n\r\nContributors with gpu, CI.\r\n\r\n**Any other info.**\r\n\r\nI believe that the implementation will first go in tensorflow addons because we have 4 GPUs available in the CI. Later on when it's stable we can split it from tensorflow addons and make it a separate pytest plugin with a public API.\r\n\r\nComments welcome. Especially from @Squadrick , @hyang0129 , @seanpmorgan since I'm not a ninja of tf.device.\n", "before_files": [{"content": "from tensorflow_addons.utils.test_utils import ( # noqa: F401\n maybe_run_functions_eagerly,\n pytest_make_parametrize_id,\n data_format,\n set_seeds,\n pytest_addoption,\n set_global_variables,\n pytest_configure,\n device,\n pytest_generate_tests,\n)\n\n# fixtures present in this file will be available\n# when running tests and can be referenced with strings\n# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions\n", "path": "tensorflow_addons/conftest.py"}]}
| 1,710 | 89 |
gh_patches_debug_29057
|
rasdani/github-patches
|
git_diff
|
vega__altair-2355
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow Paths in save()
Instead of allowing only string paths or file-likes, allow pathlib.Paths to be passed to `save()`.
Are these two the only places that would have to be changed?
https://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L8
https://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L72
</issue>
<code>
[start of altair/utils/save.py]
1 import json
2
3 from .mimebundle import spec_to_mimebundle
4
5
6 def write_file_or_filename(fp, content, mode="w"):
7 """Write content to fp, whether fp is a string or a file-like object"""
8 if isinstance(fp, str):
9 with open(fp, mode) as f:
10 f.write(content)
11 else:
12 fp.write(content)
13
14
15 def save(
16 chart,
17 fp,
18 vega_version,
19 vegaembed_version,
20 format=None,
21 mode=None,
22 vegalite_version=None,
23 embed_options=None,
24 json_kwds=None,
25 webdriver="chrome",
26 scale_factor=1,
27 **kwargs,
28 ):
29 """Save a chart to file in a variety of formats
30
31 Supported formats are [json, html, png, svg]
32
33 Parameters
34 ----------
35 chart : alt.Chart
36 the chart instance to save
37 fp : string filename or file-like object
38 file in which to write the chart.
39 format : string (optional)
40 the format to write: one of ['json', 'html', 'png', 'svg'].
41 If not specified, the format will be determined from the filename.
42 mode : string (optional)
43 Either 'vega' or 'vegalite'. If not specified, then infer the mode from
44 the '$schema' property of the spec, or the ``opt`` dictionary.
45 If it's not specified in either of those places, then use 'vegalite'.
46 vega_version : string
47 For html output, the version of vega.js to use
48 vegalite_version : string
49 For html output, the version of vegalite.js to use
50 vegaembed_version : string
51 For html output, the version of vegaembed.js to use
52 embed_options : dict
53 The vegaEmbed options dictionary. Default is {}
54 (See https://github.com/vega/vega-embed for details)
55 json_kwds : dict
56 Additional keyword arguments are passed to the output method
57 associated with the specified format.
58 webdriver : string {'chrome' | 'firefox'}
59 Webdriver to use for png or svg output
60 scale_factor : float
61 scale_factor to use to change size/resolution of png or svg output
62 **kwargs :
63 additional kwargs passed to spec_to_mimebundle.
64 """
65 if json_kwds is None:
66 json_kwds = {}
67
68 if embed_options is None:
69 embed_options = {}
70
71 if format is None:
72 if isinstance(fp, str):
73 format = fp.split(".")[-1]
74 else:
75 raise ValueError(
76 "must specify file format: " "['png', 'svg', 'pdf', 'html', 'json']"
77 )
78
79 spec = chart.to_dict()
80
81 if mode is None:
82 if "mode" in embed_options:
83 mode = embed_options["mode"]
84 elif "$schema" in spec:
85 mode = spec["$schema"].split("/")[-2]
86 else:
87 mode = "vega-lite"
88
89 if mode not in ["vega", "vega-lite"]:
90 raise ValueError("mode must be 'vega' or 'vega-lite', " "not '{}'".format(mode))
91
92 if mode == "vega-lite" and vegalite_version is None:
93 raise ValueError("must specify vega-lite version")
94
95 if format == "json":
96 json_spec = json.dumps(spec, **json_kwds)
97 write_file_or_filename(fp, json_spec, mode="w")
98 elif format == "html":
99 mimebundle = spec_to_mimebundle(
100 spec=spec,
101 format=format,
102 mode=mode,
103 vega_version=vega_version,
104 vegalite_version=vegalite_version,
105 vegaembed_version=vegaembed_version,
106 embed_options=embed_options,
107 json_kwds=json_kwds,
108 **kwargs,
109 )
110 write_file_or_filename(fp, mimebundle["text/html"], mode="w")
111 elif format in ["png", "svg", "pdf"]:
112 mimebundle = spec_to_mimebundle(
113 spec=spec,
114 format=format,
115 mode=mode,
116 vega_version=vega_version,
117 vegalite_version=vegalite_version,
118 vegaembed_version=vegaembed_version,
119 webdriver=webdriver,
120 scale_factor=scale_factor,
121 **kwargs,
122 )
123 if format == "png":
124 write_file_or_filename(fp, mimebundle["image/png"], mode="wb")
125 elif format == "pdf":
126 write_file_or_filename(fp, mimebundle["application/pdf"], mode="wb")
127 else:
128 write_file_or_filename(fp, mimebundle["image/svg+xml"], mode="w")
129 else:
130 raise ValueError("unrecognized format: '{}'".format(format))
131
[end of altair/utils/save.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/altair/utils/save.py b/altair/utils/save.py
--- a/altair/utils/save.py
+++ b/altair/utils/save.py
@@ -1,11 +1,13 @@
import json
+import pathlib
from .mimebundle import spec_to_mimebundle
def write_file_or_filename(fp, content, mode="w"):
- """Write content to fp, whether fp is a string or a file-like object"""
- if isinstance(fp, str):
+ """Write content to fp, whether fp is a string, a pathlib Path or a
+ file-like object"""
+ if isinstance(fp, str) or isinstance(fp, pathlib.PurePath):
with open(fp, mode) as f:
f.write(content)
else:
@@ -34,8 +36,8 @@
----------
chart : alt.Chart
the chart instance to save
- fp : string filename or file-like object
- file in which to write the chart.
+ fp : string filename, pathlib.Path or file-like object
+ file to which to write the chart.
format : string (optional)
the format to write: one of ['json', 'html', 'png', 'svg'].
If not specified, the format will be determined from the filename.
@@ -71,6 +73,8 @@
if format is None:
if isinstance(fp, str):
format = fp.split(".")[-1]
+ elif isinstance(fp, pathlib.PurePath):
+ format = fp.suffix.lstrip(".")
else:
raise ValueError(
"must specify file format: " "['png', 'svg', 'pdf', 'html', 'json']"
|
{"golden_diff": "diff --git a/altair/utils/save.py b/altair/utils/save.py\n--- a/altair/utils/save.py\n+++ b/altair/utils/save.py\n@@ -1,11 +1,13 @@\n import json\n+import pathlib\n \n from .mimebundle import spec_to_mimebundle\n \n \n def write_file_or_filename(fp, content, mode=\"w\"):\n- \"\"\"Write content to fp, whether fp is a string or a file-like object\"\"\"\n- if isinstance(fp, str):\n+ \"\"\"Write content to fp, whether fp is a string, a pathlib Path or a\n+ file-like object\"\"\"\n+ if isinstance(fp, str) or isinstance(fp, pathlib.PurePath):\n with open(fp, mode) as f:\n f.write(content)\n else:\n@@ -34,8 +36,8 @@\n ----------\n chart : alt.Chart\n the chart instance to save\n- fp : string filename or file-like object\n- file in which to write the chart.\n+ fp : string filename, pathlib.Path or file-like object\n+ file to which to write the chart.\n format : string (optional)\n the format to write: one of ['json', 'html', 'png', 'svg'].\n If not specified, the format will be determined from the filename.\n@@ -71,6 +73,8 @@\n if format is None:\n if isinstance(fp, str):\n format = fp.split(\".\")[-1]\n+ elif isinstance(fp, pathlib.PurePath):\n+ format = fp.suffix.lstrip(\".\")\n else:\n raise ValueError(\n \"must specify file format: \" \"['png', 'svg', 'pdf', 'html', 'json']\"\n", "issue": "Allow Paths in save()\nInstead of allowing only string paths or file-likes, allow pathlib.Paths to be passed to `save()`.\r\n\r\nAre these two the only places that would have to be changed?\r\n\r\nhttps://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L8\r\nhttps://github.com/altair-viz/altair/blob/54e03d403c1cec9ce2f2e8b14dc3d936c6686128/altair/utils/save.py#L72\n", "before_files": [{"content": "import json\n\nfrom .mimebundle import spec_to_mimebundle\n\n\ndef write_file_or_filename(fp, content, mode=\"w\"):\n \"\"\"Write content to fp, whether fp is a string or a file-like object\"\"\"\n if isinstance(fp, str):\n with open(fp, mode) as f:\n f.write(content)\n else:\n fp.write(content)\n\n\ndef save(\n chart,\n fp,\n vega_version,\n vegaembed_version,\n format=None,\n mode=None,\n vegalite_version=None,\n embed_options=None,\n json_kwds=None,\n webdriver=\"chrome\",\n scale_factor=1,\n **kwargs,\n):\n \"\"\"Save a chart to file in a variety of formats\n\n Supported formats are [json, html, png, svg]\n\n Parameters\n ----------\n chart : alt.Chart\n the chart instance to save\n fp : string filename or file-like object\n file in which to write the chart.\n format : string (optional)\n the format to write: one of ['json', 'html', 'png', 'svg'].\n If not specified, the format will be determined from the filename.\n mode : string (optional)\n Either 'vega' or 'vegalite'. If not specified, then infer the mode from\n the '$schema' property of the spec, or the ``opt`` dictionary.\n If it's not specified in either of those places, then use 'vegalite'.\n vega_version : string\n For html output, the version of vega.js to use\n vegalite_version : string\n For html output, the version of vegalite.js to use\n vegaembed_version : string\n For html output, the version of vegaembed.js to use\n embed_options : dict\n The vegaEmbed options dictionary. Default is {}\n (See https://github.com/vega/vega-embed for details)\n json_kwds : dict\n Additional keyword arguments are passed to the output method\n associated with the specified format.\n webdriver : string {'chrome' | 'firefox'}\n Webdriver to use for png or svg output\n scale_factor : float\n scale_factor to use to change size/resolution of png or svg output\n **kwargs :\n additional kwargs passed to spec_to_mimebundle.\n \"\"\"\n if json_kwds is None:\n json_kwds = {}\n\n if embed_options is None:\n embed_options = {}\n\n if format is None:\n if isinstance(fp, str):\n format = fp.split(\".\")[-1]\n else:\n raise ValueError(\n \"must specify file format: \" \"['png', 'svg', 'pdf', 'html', 'json']\"\n )\n\n spec = chart.to_dict()\n\n if mode is None:\n if \"mode\" in embed_options:\n mode = embed_options[\"mode\"]\n elif \"$schema\" in spec:\n mode = spec[\"$schema\"].split(\"/\")[-2]\n else:\n mode = \"vega-lite\"\n\n if mode not in [\"vega\", \"vega-lite\"]:\n raise ValueError(\"mode must be 'vega' or 'vega-lite', \" \"not '{}'\".format(mode))\n\n if mode == \"vega-lite\" and vegalite_version is None:\n raise ValueError(\"must specify vega-lite version\")\n\n if format == \"json\":\n json_spec = json.dumps(spec, **json_kwds)\n write_file_or_filename(fp, json_spec, mode=\"w\")\n elif format == \"html\":\n mimebundle = spec_to_mimebundle(\n spec=spec,\n format=format,\n mode=mode,\n vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version,\n embed_options=embed_options,\n json_kwds=json_kwds,\n **kwargs,\n )\n write_file_or_filename(fp, mimebundle[\"text/html\"], mode=\"w\")\n elif format in [\"png\", \"svg\", \"pdf\"]:\n mimebundle = spec_to_mimebundle(\n spec=spec,\n format=format,\n mode=mode,\n vega_version=vega_version,\n vegalite_version=vegalite_version,\n vegaembed_version=vegaembed_version,\n webdriver=webdriver,\n scale_factor=scale_factor,\n **kwargs,\n )\n if format == \"png\":\n write_file_or_filename(fp, mimebundle[\"image/png\"], mode=\"wb\")\n elif format == \"pdf\":\n write_file_or_filename(fp, mimebundle[\"application/pdf\"], mode=\"wb\")\n else:\n write_file_or_filename(fp, mimebundle[\"image/svg+xml\"], mode=\"w\")\n else:\n raise ValueError(\"unrecognized format: '{}'\".format(format))\n", "path": "altair/utils/save.py"}]}
| 2,002 | 369 |
gh_patches_debug_18663
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-3410
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ApolloTracingExtension` raises deprecation warning.
## Describe the Bug
[`datetime.utcnow` is deprecated](https://docs.python.org/3/library/datetime.html#datetime.datetime.utcnow) as of Python 3.12.
```python
.venv/lib/python3.12/site-packages/strawberry/extensions/tracing/apollo.py:89: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).
self.start_time = datetime.utcnow()
/.venv/lib/python3.12/site-packages/strawberry/extensions/tracing/apollo.py:92: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).
self.end_time = datetime.utcnow()
```
## System Information
- Strawberry version (if applicable): 0.220
<!-- POLAR PLEDGE BADGE START -->
## Upvote & Fund
- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.
- We receive the funding once the issue is completed & confirmed by you.
- Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/strawberry-graphql/strawberry/issues/3409">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3409/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3409/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of strawberry/extensions/tracing/apollo.py]
1 from __future__ import annotations
2
3 import dataclasses
4 import time
5 from datetime import datetime
6 from inspect import isawaitable
7 from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional
8
9 from strawberry.extensions import SchemaExtension
10 from strawberry.extensions.utils import get_path_from_info
11
12 from .utils import should_skip_tracing
13
14 if TYPE_CHECKING:
15 from graphql import GraphQLResolveInfo
16
17 DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
18
19 if TYPE_CHECKING:
20 from strawberry.types.execution import ExecutionContext
21
22
23 @dataclasses.dataclass
24 class ApolloStepStats:
25 start_offset: int
26 duration: int
27
28 def to_json(self) -> Dict[str, Any]:
29 return {"startOffset": self.start_offset, "duration": self.duration}
30
31
32 @dataclasses.dataclass
33 class ApolloResolverStats:
34 path: List[str]
35 parent_type: Any
36 field_name: str
37 return_type: Any
38 start_offset: int
39 duration: Optional[int] = None
40
41 def to_json(self) -> Dict[str, Any]:
42 return {
43 "path": self.path,
44 "field_name": self.field_name,
45 "parentType": str(self.parent_type),
46 "returnType": str(self.return_type),
47 "startOffset": self.start_offset,
48 "duration": self.duration,
49 }
50
51
52 @dataclasses.dataclass
53 class ApolloExecutionStats:
54 resolvers: List[ApolloResolverStats]
55
56 def to_json(self) -> Dict[str, Any]:
57 return {"resolvers": [resolver.to_json() for resolver in self.resolvers]}
58
59
60 @dataclasses.dataclass
61 class ApolloTracingStats:
62 start_time: datetime
63 end_time: datetime
64 duration: int
65 execution: ApolloExecutionStats
66 validation: ApolloStepStats
67 parsing: ApolloStepStats
68 version: int = 1
69
70 def to_json(self) -> Dict[str, Any]:
71 return {
72 "version": self.version,
73 "startTime": self.start_time.strftime(DATETIME_FORMAT),
74 "endTime": self.end_time.strftime(DATETIME_FORMAT),
75 "duration": self.duration,
76 "execution": self.execution.to_json(),
77 "validation": self.validation.to_json(),
78 "parsing": self.parsing.to_json(),
79 }
80
81
82 class ApolloTracingExtension(SchemaExtension):
83 def __init__(self, execution_context: ExecutionContext):
84 self._resolver_stats: List[ApolloResolverStats] = []
85 self.execution_context = execution_context
86
87 def on_operation(self) -> Generator[None, None, None]:
88 self.start_timestamp = self.now()
89 self.start_time = datetime.utcnow()
90 yield
91 self.end_timestamp = self.now()
92 self.end_time = datetime.utcnow()
93
94 def on_parse(self) -> Generator[None, None, None]:
95 self._start_parsing = self.now()
96 yield
97 self._end_parsing = self.now()
98
99 def on_validate(self) -> Generator[None, None, None]:
100 self._start_validation = self.now()
101 yield
102 self._end_validation = self.now()
103
104 def now(self) -> int:
105 return time.perf_counter_ns()
106
107 @property
108 def stats(self) -> ApolloTracingStats:
109 return ApolloTracingStats(
110 start_time=self.start_time,
111 end_time=self.end_time,
112 duration=self.end_timestamp - self.start_timestamp,
113 execution=ApolloExecutionStats(self._resolver_stats),
114 validation=ApolloStepStats(
115 start_offset=self._start_validation - self.start_timestamp,
116 duration=self._end_validation - self._start_validation,
117 ),
118 parsing=ApolloStepStats(
119 start_offset=self._start_parsing - self.start_timestamp,
120 duration=self._end_parsing - self._start_parsing,
121 ),
122 )
123
124 def get_results(self) -> Dict[str, Dict[str, Any]]:
125 return {"tracing": self.stats.to_json()}
126
127 async def resolve(
128 self,
129 _next: Callable,
130 root: Any,
131 info: GraphQLResolveInfo,
132 *args: str,
133 **kwargs: Any,
134 ) -> Any:
135 if should_skip_tracing(_next, info):
136 result = _next(root, info, *args, **kwargs)
137
138 if isawaitable(result):
139 result = await result # pragma: no cover
140
141 return result
142
143 start_timestamp = self.now()
144
145 resolver_stats = ApolloResolverStats(
146 path=get_path_from_info(info),
147 field_name=info.field_name,
148 parent_type=info.parent_type,
149 return_type=info.return_type,
150 start_offset=start_timestamp - self.start_timestamp,
151 )
152
153 try:
154 result = _next(root, info, *args, **kwargs)
155
156 if isawaitable(result):
157 result = await result
158
159 return result
160 finally:
161 end_timestamp = self.now()
162 resolver_stats.duration = end_timestamp - start_timestamp
163 self._resolver_stats.append(resolver_stats)
164
165
166 class ApolloTracingExtensionSync(ApolloTracingExtension):
167 def resolve(
168 self,
169 _next: Callable,
170 root: Any,
171 info: GraphQLResolveInfo,
172 *args: str,
173 **kwargs: Any,
174 ) -> Any:
175 if should_skip_tracing(_next, info):
176 return _next(root, info, *args, **kwargs)
177
178 start_timestamp = self.now()
179
180 resolver_stats = ApolloResolverStats(
181 path=get_path_from_info(info),
182 field_name=info.field_name,
183 parent_type=info.parent_type,
184 return_type=info.return_type,
185 start_offset=start_timestamp - self.start_timestamp,
186 )
187
188 try:
189 return _next(root, info, *args, **kwargs)
190 finally:
191 end_timestamp = self.now()
192 resolver_stats.duration = end_timestamp - start_timestamp
193 self._resolver_stats.append(resolver_stats)
194
[end of strawberry/extensions/tracing/apollo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/extensions/tracing/apollo.py b/strawberry/extensions/tracing/apollo.py
--- a/strawberry/extensions/tracing/apollo.py
+++ b/strawberry/extensions/tracing/apollo.py
@@ -2,7 +2,7 @@
import dataclasses
import time
-from datetime import datetime
+from datetime import datetime, timezone
from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional
@@ -86,10 +86,10 @@
def on_operation(self) -> Generator[None, None, None]:
self.start_timestamp = self.now()
- self.start_time = datetime.utcnow()
+ self.start_time = datetime.now(timezone.utc)
yield
self.end_timestamp = self.now()
- self.end_time = datetime.utcnow()
+ self.end_time = datetime.now(timezone.utc)
def on_parse(self) -> Generator[None, None, None]:
self._start_parsing = self.now()
|
{"golden_diff": "diff --git a/strawberry/extensions/tracing/apollo.py b/strawberry/extensions/tracing/apollo.py\n--- a/strawberry/extensions/tracing/apollo.py\n+++ b/strawberry/extensions/tracing/apollo.py\n@@ -2,7 +2,7 @@\n \n import dataclasses\n import time\n-from datetime import datetime\n+from datetime import datetime, timezone\n from inspect import isawaitable\n from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional\n \n@@ -86,10 +86,10 @@\n \n def on_operation(self) -> Generator[None, None, None]:\n self.start_timestamp = self.now()\n- self.start_time = datetime.utcnow()\n+ self.start_time = datetime.now(timezone.utc)\n yield\n self.end_timestamp = self.now()\n- self.end_time = datetime.utcnow()\n+ self.end_time = datetime.now(timezone.utc)\n \n def on_parse(self) -> Generator[None, None, None]:\n self._start_parsing = self.now()\n", "issue": "`ApolloTracingExtension` raises deprecation warning.\n## Describe the Bug\r\n[`datetime.utcnow` is deprecated](https://docs.python.org/3/library/datetime.html#datetime.datetime.utcnow) as of Python 3.12.\r\n\r\n```python\r\n .venv/lib/python3.12/site-packages/strawberry/extensions/tracing/apollo.py:89: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n self.start_time = datetime.utcnow()\r\n\r\n /.venv/lib/python3.12/site-packages/strawberry/extensions/tracing/apollo.py:92: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n self.end_time = datetime.utcnow()\r\n```\r\n\r\n## System Information\r\n - Strawberry version (if applicable): 0.220\r\n\n\n<!-- POLAR PLEDGE BADGE START -->\n## Upvote & Fund\n\n- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.\n- We receive the funding once the issue is completed & confirmed by you.\n- Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/strawberry-graphql/strawberry/issues/3409\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3409/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3409/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nimport time\nfrom datetime import datetime\nfrom inspect import isawaitable\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional\n\nfrom strawberry.extensions import SchemaExtension\nfrom strawberry.extensions.utils import get_path_from_info\n\nfrom .utils import should_skip_tracing\n\nif TYPE_CHECKING:\n from graphql import GraphQLResolveInfo\n\nDATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nif TYPE_CHECKING:\n from strawberry.types.execution import ExecutionContext\n\n\[email protected]\nclass ApolloStepStats:\n start_offset: int\n duration: int\n\n def to_json(self) -> Dict[str, Any]:\n return {\"startOffset\": self.start_offset, \"duration\": self.duration}\n\n\[email protected]\nclass ApolloResolverStats:\n path: List[str]\n parent_type: Any\n field_name: str\n return_type: Any\n start_offset: int\n duration: Optional[int] = None\n\n def to_json(self) -> Dict[str, Any]:\n return {\n \"path\": self.path,\n \"field_name\": self.field_name,\n \"parentType\": str(self.parent_type),\n \"returnType\": str(self.return_type),\n \"startOffset\": self.start_offset,\n \"duration\": self.duration,\n }\n\n\[email protected]\nclass ApolloExecutionStats:\n resolvers: List[ApolloResolverStats]\n\n def to_json(self) -> Dict[str, Any]:\n return {\"resolvers\": [resolver.to_json() for resolver in self.resolvers]}\n\n\[email protected]\nclass ApolloTracingStats:\n start_time: datetime\n end_time: datetime\n duration: int\n execution: ApolloExecutionStats\n validation: ApolloStepStats\n parsing: ApolloStepStats\n version: int = 1\n\n def to_json(self) -> Dict[str, Any]:\n return {\n \"version\": self.version,\n \"startTime\": self.start_time.strftime(DATETIME_FORMAT),\n \"endTime\": self.end_time.strftime(DATETIME_FORMAT),\n \"duration\": self.duration,\n \"execution\": self.execution.to_json(),\n \"validation\": self.validation.to_json(),\n \"parsing\": self.parsing.to_json(),\n }\n\n\nclass ApolloTracingExtension(SchemaExtension):\n def __init__(self, execution_context: ExecutionContext):\n self._resolver_stats: List[ApolloResolverStats] = []\n self.execution_context = execution_context\n\n def on_operation(self) -> Generator[None, None, None]:\n self.start_timestamp = self.now()\n self.start_time = datetime.utcnow()\n yield\n self.end_timestamp = self.now()\n self.end_time = datetime.utcnow()\n\n def on_parse(self) -> Generator[None, None, None]:\n self._start_parsing = self.now()\n yield\n self._end_parsing = self.now()\n\n def on_validate(self) -> Generator[None, None, None]:\n self._start_validation = self.now()\n yield\n self._end_validation = self.now()\n\n def now(self) -> int:\n return time.perf_counter_ns()\n\n @property\n def stats(self) -> ApolloTracingStats:\n return ApolloTracingStats(\n start_time=self.start_time,\n end_time=self.end_time,\n duration=self.end_timestamp - self.start_timestamp,\n execution=ApolloExecutionStats(self._resolver_stats),\n validation=ApolloStepStats(\n start_offset=self._start_validation - self.start_timestamp,\n duration=self._end_validation - self._start_validation,\n ),\n parsing=ApolloStepStats(\n start_offset=self._start_parsing - self.start_timestamp,\n duration=self._end_parsing - self._start_parsing,\n ),\n )\n\n def get_results(self) -> Dict[str, Dict[str, Any]]:\n return {\"tracing\": self.stats.to_json()}\n\n async def resolve(\n self,\n _next: Callable,\n root: Any,\n info: GraphQLResolveInfo,\n *args: str,\n **kwargs: Any,\n ) -> Any:\n if should_skip_tracing(_next, info):\n result = _next(root, info, *args, **kwargs)\n\n if isawaitable(result):\n result = await result # pragma: no cover\n\n return result\n\n start_timestamp = self.now()\n\n resolver_stats = ApolloResolverStats(\n path=get_path_from_info(info),\n field_name=info.field_name,\n parent_type=info.parent_type,\n return_type=info.return_type,\n start_offset=start_timestamp - self.start_timestamp,\n )\n\n try:\n result = _next(root, info, *args, **kwargs)\n\n if isawaitable(result):\n result = await result\n\n return result\n finally:\n end_timestamp = self.now()\n resolver_stats.duration = end_timestamp - start_timestamp\n self._resolver_stats.append(resolver_stats)\n\n\nclass ApolloTracingExtensionSync(ApolloTracingExtension):\n def resolve(\n self,\n _next: Callable,\n root: Any,\n info: GraphQLResolveInfo,\n *args: str,\n **kwargs: Any,\n ) -> Any:\n if should_skip_tracing(_next, info):\n return _next(root, info, *args, **kwargs)\n\n start_timestamp = self.now()\n\n resolver_stats = ApolloResolverStats(\n path=get_path_from_info(info),\n field_name=info.field_name,\n parent_type=info.parent_type,\n return_type=info.return_type,\n start_offset=start_timestamp - self.start_timestamp,\n )\n\n try:\n return _next(root, info, *args, **kwargs)\n finally:\n end_timestamp = self.now()\n resolver_stats.duration = end_timestamp - start_timestamp\n self._resolver_stats.append(resolver_stats)\n", "path": "strawberry/extensions/tracing/apollo.py"}]}
| 2,694 | 226 |
gh_patches_debug_34570
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-2360
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tfx-oss sample test would fail when retry the test
When I try '/test kubeflow-pipeline-sample-test' for flakiness tests, the parameterized_tfx_oss would always fail due to the fact that they are writing to the same directory.
See: https://prow.k8s.io/view/gcs/kubernetes-jenkins/pr-logs/pull/kubeflow_pipelines/2349/kubeflow-pipeline-sample-test/1182166563743076352
</issue>
<code>
[start of samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py]
1 #!/usr/bin/env python3
2 # Copyright 2019 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import argparse
17 import os
18 import tensorflow as tf
19
20 from typing import Text
21
22 import kfp
23 from kfp import dsl
24 from tfx.components.evaluator.component import Evaluator
25 from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
26 from tfx.components.example_validator.component import ExampleValidator
27 from tfx.components.model_validator.component import ModelValidator
28 from tfx.components.pusher.component import Pusher
29 from tfx.components.schema_gen.component import SchemaGen
30 from tfx.components.statistics_gen.component import StatisticsGen
31 from tfx.components.trainer.component import Trainer
32 from tfx.components.transform.component import Transform
33 from tfx.orchestration import metadata
34 from tfx.orchestration import pipeline
35 from tfx.orchestration.kubeflow import kubeflow_dag_runner
36 from tfx.proto import evaluator_pb2
37 from tfx.utils.dsl_utils import csv_input
38 from tfx.proto import pusher_pb2
39 from tfx.proto import trainer_pb2
40 from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
41 from ml_metadata.proto import metadata_store_pb2
42 from tfx.orchestration.kubeflow.proto import kubeflow_pb2
43
44 # Define pipeline params used for pipeline execution.
45 # Path to the module file, should be a GCS path.
46 _taxi_module_file_param = dsl.PipelineParam(
47 name='module-file',
48 value='gs://ml-pipeline-playground/tfx_taxi_simple/modules/taxi_utils.py')
49
50 # Path to the CSV data file, under which their should be a data.csv file.
51 _data_root_param = dsl.PipelineParam(
52 name='data-root',
53 value='gs://ml-pipeline-playground/tfx_taxi_simple/data')
54
55 # Path of pipeline root, should be a GCS path.
56 _pipeline_root_param = dsl.PipelineParam(
57 name='pipeline-root',
58 value=os.path.join('gs://your-bucket', 'tfx_taxi_simple'))
59
60 def _create_test_pipeline(pipeline_root: Text, csv_input_location: Text,
61 taxi_module_file: Text, enable_cache: bool):
62 """Creates a simple Kubeflow-based Chicago Taxi TFX pipeline.
63
64 Args:
65 pipeline_name: The name of the pipeline.
66 pipeline_root: The root of the pipeline output.
67 csv_input_location: The location of the input data directory.
68 taxi_module_file: The location of the module file for Transform/Trainer.
69 enable_cache: Whether to enable cache or not.
70
71 Returns:
72 A logical TFX pipeline.Pipeline object.
73 """
74 examples = csv_input(csv_input_location)
75
76 example_gen = CsvExampleGen(input_base=examples)
77 statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)
78 infer_schema = SchemaGen(
79 stats=statistics_gen.outputs.output, infer_feature_shape=False)
80 validate_stats = ExampleValidator(
81 stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output)
82 transform = Transform(
83 input_data=example_gen.outputs.examples,
84 schema=infer_schema.outputs.output,
85 module_file=taxi_module_file)
86 trainer = Trainer(
87 module_file=taxi_module_file,
88 transformed_examples=transform.outputs.transformed_examples,
89 schema=infer_schema.outputs.output,
90 transform_output=transform.outputs.transform_output,
91 train_args=trainer_pb2.TrainArgs(num_steps=10),
92 eval_args=trainer_pb2.EvalArgs(num_steps=5))
93 model_analyzer = Evaluator(
94 examples=example_gen.outputs.examples,
95 model_exports=trainer.outputs.output,
96 feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
97 evaluator_pb2.SingleSlicingSpec(
98 column_for_slicing=['trip_start_hour'])
99 ]))
100 model_validator = ModelValidator(
101 examples=example_gen.outputs.examples, model=trainer.outputs.output)
102 pusher = Pusher(
103 model_export=trainer.outputs.output,
104 model_blessing=model_validator.outputs.blessing,
105 push_destination=pusher_pb2.PushDestination(
106 filesystem=pusher_pb2.PushDestination.Filesystem(
107 base_directory=os.path.join(pipeline_root, 'model_serving'))))
108
109 return pipeline.Pipeline(
110 pipeline_name='parameterized_tfx_oss',
111 pipeline_root=pipeline_root,
112 components=[
113 example_gen, statistics_gen, infer_schema, validate_stats, transform,
114 trainer, model_analyzer, model_validator, pusher
115 ],
116 enable_cache=enable_cache,
117 )
118
119
120 def _get_kubeflow_metadata_config() -> kubeflow_pb2.KubeflowMetadataConfig:
121 config = kubeflow_pb2.KubeflowMetadataConfig()
122 config.mysql_db_service_host.environment_variable = 'MYSQL_SERVICE_HOST'
123 config.mysql_db_service_port.environment_variable = 'MYSQL_SERVICE_PORT'
124 config.mysql_db_name.value = 'metadb'
125 config.mysql_db_user.value = 'root'
126 config.mysql_db_password.value = ''
127 return config
128
129
130 if __name__ == '__main__':
131
132 enable_cache = True
133
134 pipeline = _create_test_pipeline(
135 str(_pipeline_root_param),
136 str(_data_root_param),
137 str(_taxi_module_file_param),
138 enable_cache=enable_cache)
139
140 config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
141 kubeflow_metadata_config=_get_kubeflow_metadata_config())
142
143 kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(config=config)
144 # Make sure kfp_runner recognizes those parameters.
145 kfp_runner._params.extend([_data_root_param, _taxi_module_file_param])
146
147 kfp_runner.run(pipeline)
[end of samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py b/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py
--- a/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py
+++ b/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py
@@ -53,9 +53,7 @@
value='gs://ml-pipeline-playground/tfx_taxi_simple/data')
# Path of pipeline root, should be a GCS path.
-_pipeline_root_param = dsl.PipelineParam(
- name='pipeline-root',
- value=os.path.join('gs://your-bucket', 'tfx_taxi_simple'))
+pipeline_root = os.path.join('gs://your-bucket', 'tfx_taxi_simple')
def _create_test_pipeline(pipeline_root: Text, csv_input_location: Text,
taxi_module_file: Text, enable_cache: bool):
@@ -99,12 +97,18 @@
]))
model_validator = ModelValidator(
examples=example_gen.outputs.examples, model=trainer.outputs.output)
+
+ # Hack: ensuring push_destination can be correctly parameterized and interpreted.
+ # pipeline root will be specified as a dsl.PipelineParam with the name
+ # pipeline-root, see:
+ # https://github.com/tensorflow/tfx/blob/1c670e92143c7856f67a866f721b8a9368ede385/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L226
+ _pipeline_root_param = dsl.PipelineParam(name='pipeline-root')
pusher = Pusher(
model_export=trainer.outputs.output,
model_blessing=model_validator.outputs.blessing,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
- base_directory=os.path.join(pipeline_root, 'model_serving'))))
+ base_directory=os.path.join(str(_pipeline_root_param), 'model_serving'))))
return pipeline.Pipeline(
pipeline_name='parameterized_tfx_oss',
@@ -130,9 +134,8 @@
if __name__ == '__main__':
enable_cache = True
-
pipeline = _create_test_pipeline(
- str(_pipeline_root_param),
+ pipeline_root,
str(_data_root_param),
str(_taxi_module_file_param),
enable_cache=enable_cache)
|
{"golden_diff": "diff --git a/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py b/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py\n--- a/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py\n+++ b/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py\n@@ -53,9 +53,7 @@\n value='gs://ml-pipeline-playground/tfx_taxi_simple/data')\n \n # Path of pipeline root, should be a GCS path.\n-_pipeline_root_param = dsl.PipelineParam(\n- name='pipeline-root',\n- value=os.path.join('gs://your-bucket', 'tfx_taxi_simple'))\n+pipeline_root = os.path.join('gs://your-bucket', 'tfx_taxi_simple')\n \n def _create_test_pipeline(pipeline_root: Text, csv_input_location: Text,\n taxi_module_file: Text, enable_cache: bool):\n@@ -99,12 +97,18 @@\n ]))\n model_validator = ModelValidator(\n examples=example_gen.outputs.examples, model=trainer.outputs.output)\n+\n+ # Hack: ensuring push_destination can be correctly parameterized and interpreted.\n+ # pipeline root will be specified as a dsl.PipelineParam with the name\n+ # pipeline-root, see:\n+ # https://github.com/tensorflow/tfx/blob/1c670e92143c7856f67a866f721b8a9368ede385/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L226\n+ _pipeline_root_param = dsl.PipelineParam(name='pipeline-root')\n pusher = Pusher(\n model_export=trainer.outputs.output,\n model_blessing=model_validator.outputs.blessing,\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n- base_directory=os.path.join(pipeline_root, 'model_serving'))))\n+ base_directory=os.path.join(str(_pipeline_root_param), 'model_serving'))))\n \n return pipeline.Pipeline(\n pipeline_name='parameterized_tfx_oss',\n@@ -130,9 +134,8 @@\n if __name__ == '__main__':\n \n enable_cache = True\n-\n pipeline = _create_test_pipeline(\n- str(_pipeline_root_param),\n+ pipeline_root,\n str(_data_root_param),\n str(_taxi_module_file_param),\n enable_cache=enable_cache)\n", "issue": "tfx-oss sample test would fail when retry the test\nWhen I try '/test kubeflow-pipeline-sample-test' for flakiness tests, the parameterized_tfx_oss would always fail due to the fact that they are writing to the same directory.\r\nSee: https://prow.k8s.io/view/gcs/kubernetes-jenkins/pr-logs/pull/kubeflow_pipelines/2349/kubeflow-pipeline-sample-test/1182166563743076352\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport tensorflow as tf\n\nfrom typing import Text\n\nimport kfp\nfrom kfp import dsl\nfrom tfx.components.evaluator.component import Evaluator\nfrom tfx.components.example_gen.csv_example_gen.component import CsvExampleGen\nfrom tfx.components.example_validator.component import ExampleValidator\nfrom tfx.components.model_validator.component import ModelValidator\nfrom tfx.components.pusher.component import Pusher\nfrom tfx.components.schema_gen.component import SchemaGen\nfrom tfx.components.statistics_gen.component import StatisticsGen\nfrom tfx.components.trainer.component import Trainer\nfrom tfx.components.transform.component import Transform\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import evaluator_pb2\nfrom tfx.utils.dsl_utils import csv_input\nfrom tfx.proto import pusher_pb2\nfrom tfx.proto import trainer_pb2\nfrom tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor\nfrom ml_metadata.proto import metadata_store_pb2\nfrom tfx.orchestration.kubeflow.proto import kubeflow_pb2\n\n# Define pipeline params used for pipeline execution.\n# Path to the module file, should be a GCS path.\n_taxi_module_file_param = dsl.PipelineParam(\n name='module-file',\n value='gs://ml-pipeline-playground/tfx_taxi_simple/modules/taxi_utils.py')\n\n# Path to the CSV data file, under which their should be a data.csv file.\n_data_root_param = dsl.PipelineParam(\n name='data-root',\n value='gs://ml-pipeline-playground/tfx_taxi_simple/data')\n\n# Path of pipeline root, should be a GCS path.\n_pipeline_root_param = dsl.PipelineParam(\n name='pipeline-root',\n value=os.path.join('gs://your-bucket', 'tfx_taxi_simple'))\n\ndef _create_test_pipeline(pipeline_root: Text, csv_input_location: Text,\n taxi_module_file: Text, enable_cache: bool):\n \"\"\"Creates a simple Kubeflow-based Chicago Taxi TFX pipeline.\n\n Args:\n pipeline_name: The name of the pipeline.\n pipeline_root: The root of the pipeline output.\n csv_input_location: The location of the input data directory.\n taxi_module_file: The location of the module file for Transform/Trainer.\n enable_cache: Whether to enable cache or not.\n\n Returns:\n A logical TFX pipeline.Pipeline object.\n \"\"\"\n examples = csv_input(csv_input_location)\n\n example_gen = CsvExampleGen(input_base=examples)\n statistics_gen = StatisticsGen(input_data=example_gen.outputs.examples)\n infer_schema = SchemaGen(\n stats=statistics_gen.outputs.output, infer_feature_shape=False)\n validate_stats = ExampleValidator(\n stats=statistics_gen.outputs.output, schema=infer_schema.outputs.output)\n transform = Transform(\n input_data=example_gen.outputs.examples,\n schema=infer_schema.outputs.output,\n module_file=taxi_module_file)\n trainer = Trainer(\n module_file=taxi_module_file,\n transformed_examples=transform.outputs.transformed_examples,\n schema=infer_schema.outputs.output,\n transform_output=transform.outputs.transform_output,\n train_args=trainer_pb2.TrainArgs(num_steps=10),\n eval_args=trainer_pb2.EvalArgs(num_steps=5))\n model_analyzer = Evaluator(\n examples=example_gen.outputs.examples,\n model_exports=trainer.outputs.output,\n feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[\n evaluator_pb2.SingleSlicingSpec(\n column_for_slicing=['trip_start_hour'])\n ]))\n model_validator = ModelValidator(\n examples=example_gen.outputs.examples, model=trainer.outputs.output)\n pusher = Pusher(\n model_export=trainer.outputs.output,\n model_blessing=model_validator.outputs.blessing,\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.join(pipeline_root, 'model_serving'))))\n\n return pipeline.Pipeline(\n pipeline_name='parameterized_tfx_oss',\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_analyzer, model_validator, pusher\n ],\n enable_cache=enable_cache,\n )\n\n\ndef _get_kubeflow_metadata_config() -> kubeflow_pb2.KubeflowMetadataConfig:\n config = kubeflow_pb2.KubeflowMetadataConfig()\n config.mysql_db_service_host.environment_variable = 'MYSQL_SERVICE_HOST'\n config.mysql_db_service_port.environment_variable = 'MYSQL_SERVICE_PORT'\n config.mysql_db_name.value = 'metadb'\n config.mysql_db_user.value = 'root'\n config.mysql_db_password.value = ''\n return config\n\n\nif __name__ == '__main__':\n\n enable_cache = True\n\n pipeline = _create_test_pipeline(\n str(_pipeline_root_param),\n str(_data_root_param),\n str(_taxi_module_file_param),\n enable_cache=enable_cache)\n\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=_get_kubeflow_metadata_config())\n\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(config=config)\n # Make sure kfp_runner recognizes those parameters.\n kfp_runner._params.extend([_data_root_param, _taxi_module_file_param])\n\n kfp_runner.run(pipeline)", "path": "samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.py"}]}
| 2,356 | 572 |
gh_patches_debug_18410
|
rasdani/github-patches
|
git_diff
|
openai__openai-python-176
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[0.26.0] openai/cli.py:440: RuntimeWarning: coroutine 'FineTune.stream_events' was never awaited
```
bash$ openai api fine_tunes.create -t ./train_test_prepared.jsonl -m davinci
Upload progress: 100%|██████████████████████████████████████████████████████████████████████████████| 112/112 [00:00<00:00, 88.5kit/s]
Uploaded file from ./train_test_prepared.jsonl: file-ppEDNe0p6EomteEp3JFbBoFp
Created fine-tune: ft-u9KskmmvSnBtVc4VDfbe7lyr
Streaming events until fine-tuning is complete...
(Ctrl-C will interrupt the stream, but not cancel the fine-tune)
Stream interrupted (client disconnected).
To resume the stream, run:
openai api fine_tunes.follow -i ft-u9KskmmvSnBtVc4VDfbe7lyr
/usr/lib/python3.10/site-packages/openai/cli.py:406: RuntimeWarning: coroutine 'FineTune.stream_events' was never awaited
cls._stream_events(resp["id"])
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
```
it is working fine with 0.25.0
may be related: dev-python/aiohttp-3.8.3
</issue>
<code>
[start of openai/api_resources/fine_tune.py]
1 from urllib.parse import quote_plus
2
3 from openai import api_requestor, util, error
4 from openai.api_resources.abstract import (
5 CreateableAPIResource,
6 ListableAPIResource,
7 nested_resource_class_methods,
8 )
9 from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
10 from openai.openai_response import OpenAIResponse
11 from openai.util import ApiType
12
13
14 @nested_resource_class_methods("event", operations=["list"])
15 class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
16 OBJECT_NAME = "fine-tunes"
17
18 @classmethod
19 def _prepare_cancel(
20 cls,
21 id,
22 api_key=None,
23 api_type=None,
24 request_id=None,
25 api_version=None,
26 **params,
27 ):
28 base = cls.class_url()
29 extn = quote_plus(id)
30
31 typed_api_type, api_version = cls._get_api_type_and_version(
32 api_type, api_version
33 )
34 if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
35 url = "/%s%s/%s/cancel?api-version=%s" % (
36 cls.azure_api_prefix,
37 base,
38 extn,
39 api_version,
40 )
41 elif typed_api_type == ApiType.OPEN_AI:
42 url = "%s/%s/cancel" % (base, extn)
43 else:
44 raise error.InvalidAPIType("Unsupported API type %s" % api_type)
45
46 instance = cls(id, api_key, **params)
47 return instance, url
48
49 @classmethod
50 def cancel(
51 cls,
52 id,
53 api_key=None,
54 api_type=None,
55 request_id=None,
56 api_version=None,
57 **params,
58 ):
59 instance, url = cls._prepare_cancel(
60 id,
61 api_key,
62 api_type,
63 request_id,
64 api_version,
65 **params,
66 )
67 return instance.request("post", url, request_id=request_id)
68
69 @classmethod
70 def acancel(
71 cls,
72 id,
73 api_key=None,
74 api_type=None,
75 request_id=None,
76 api_version=None,
77 **params,
78 ):
79 instance, url = cls._prepare_cancel(
80 id,
81 api_key,
82 api_type,
83 request_id,
84 api_version,
85 **params,
86 )
87 return instance.arequest("post", url, request_id=request_id)
88
89 @classmethod
90 def _prepare_stream_events(
91 cls,
92 id,
93 api_key=None,
94 api_base=None,
95 api_type=None,
96 request_id=None,
97 api_version=None,
98 organization=None,
99 **params,
100 ):
101 base = cls.class_url()
102 extn = quote_plus(id)
103
104 requestor = api_requestor.APIRequestor(
105 api_key,
106 api_base=api_base,
107 api_type=api_type,
108 api_version=api_version,
109 organization=organization,
110 )
111
112 typed_api_type, api_version = cls._get_api_type_and_version(
113 api_type, api_version
114 )
115
116 if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
117 url = "/%s%s/%s/events?stream=true&api-version=%s" % (
118 cls.azure_api_prefix,
119 base,
120 extn,
121 api_version,
122 )
123 elif typed_api_type == ApiType.OPEN_AI:
124 url = "%s/%s/events?stream=true" % (base, extn)
125 else:
126 raise error.InvalidAPIType("Unsupported API type %s" % api_type)
127
128 return requestor, url
129
130 @classmethod
131 async def stream_events(
132 cls,
133 id,
134 api_key=None,
135 api_base=None,
136 api_type=None,
137 request_id=None,
138 api_version=None,
139 organization=None,
140 **params,
141 ):
142 requestor, url = cls._prepare_stream_events(
143 id,
144 api_key,
145 api_base,
146 api_type,
147 request_id,
148 api_version,
149 organization,
150 **params,
151 )
152
153 response, _, api_key = await requestor.arequest(
154 "get", url, params, stream=True, request_id=request_id
155 )
156
157 assert not isinstance(response, OpenAIResponse) # must be an iterator
158 return (
159 util.convert_to_openai_object(
160 line,
161 api_key,
162 api_version,
163 organization,
164 )
165 for line in response
166 )
167
[end of openai/api_resources/fine_tune.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openai/api_resources/fine_tune.py b/openai/api_resources/fine_tune.py
--- a/openai/api_resources/fine_tune.py
+++ b/openai/api_resources/fine_tune.py
@@ -128,7 +128,45 @@
return requestor, url
@classmethod
- async def stream_events(
+ def stream_events(
+ cls,
+ id,
+ api_key=None,
+ api_base=None,
+ api_type=None,
+ request_id=None,
+ api_version=None,
+ organization=None,
+ **params,
+ ):
+ requestor, url = cls._prepare_stream_events(
+ id,
+ api_key,
+ api_base,
+ api_type,
+ request_id,
+ api_version,
+ organization,
+ **params,
+ )
+
+ response, _, api_key = requestor.request(
+ "get", url, params, stream=True, request_id=request_id
+ )
+
+ assert not isinstance(response, OpenAIResponse) # must be an iterator
+ return (
+ util.convert_to_openai_object(
+ line,
+ api_key,
+ api_version,
+ organization,
+ )
+ for line in response
+ )
+
+ @classmethod
+ async def astream_events(
cls,
id,
api_key=None,
|
{"golden_diff": "diff --git a/openai/api_resources/fine_tune.py b/openai/api_resources/fine_tune.py\n--- a/openai/api_resources/fine_tune.py\n+++ b/openai/api_resources/fine_tune.py\n@@ -128,7 +128,45 @@\n return requestor, url\n \n @classmethod\n- async def stream_events(\n+ def stream_events(\n+ cls,\n+ id,\n+ api_key=None,\n+ api_base=None,\n+ api_type=None,\n+ request_id=None,\n+ api_version=None,\n+ organization=None,\n+ **params,\n+ ):\n+ requestor, url = cls._prepare_stream_events(\n+ id,\n+ api_key,\n+ api_base,\n+ api_type,\n+ request_id,\n+ api_version,\n+ organization,\n+ **params,\n+ )\n+\n+ response, _, api_key = requestor.request(\n+ \"get\", url, params, stream=True, request_id=request_id\n+ )\n+\n+ assert not isinstance(response, OpenAIResponse) # must be an iterator\n+ return (\n+ util.convert_to_openai_object(\n+ line,\n+ api_key,\n+ api_version,\n+ organization,\n+ )\n+ for line in response\n+ )\n+\n+ @classmethod\n+ async def astream_events(\n cls,\n id,\n api_key=None,\n", "issue": "[0.26.0] openai/cli.py:440: RuntimeWarning: coroutine 'FineTune.stream_events' was never awaited\n```\r\nbash$ openai api fine_tunes.create -t ./train_test_prepared.jsonl -m davinci\r\nUpload progress: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 112/112 [00:00<00:00, 88.5kit/s]\r\nUploaded file from ./train_test_prepared.jsonl: file-ppEDNe0p6EomteEp3JFbBoFp\r\nCreated fine-tune: ft-u9KskmmvSnBtVc4VDfbe7lyr\r\nStreaming events until fine-tuning is complete...\r\n\r\n(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\r\n\r\nStream interrupted (client disconnected).\r\nTo resume the stream, run:\r\n\r\n openai api fine_tunes.follow -i ft-u9KskmmvSnBtVc4VDfbe7lyr\r\n\r\n/usr/lib/python3.10/site-packages/openai/cli.py:406: RuntimeWarning: coroutine 'FineTune.stream_events' was never awaited\r\n cls._stream_events(resp[\"id\"])\r\nRuntimeWarning: Enable tracemalloc to get the object allocation traceback\r\n\r\n```\r\n\r\nit is working fine with 0.25.0\r\n\r\nmay be related: dev-python/aiohttp-3.8.3\r\n\n", "before_files": [{"content": "from urllib.parse import quote_plus\n\nfrom openai import api_requestor, util, error\nfrom openai.api_resources.abstract import (\n CreateableAPIResource,\n ListableAPIResource,\n nested_resource_class_methods,\n)\nfrom openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource\nfrom openai.openai_response import OpenAIResponse\nfrom openai.util import ApiType\n\n\n@nested_resource_class_methods(\"event\", operations=[\"list\"])\nclass FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):\n OBJECT_NAME = \"fine-tunes\"\n\n @classmethod\n def _prepare_cancel(\n cls,\n id,\n api_key=None,\n api_type=None,\n request_id=None,\n api_version=None,\n **params,\n ):\n base = cls.class_url()\n extn = quote_plus(id)\n\n typed_api_type, api_version = cls._get_api_type_and_version(\n api_type, api_version\n )\n if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):\n url = \"/%s%s/%s/cancel?api-version=%s\" % (\n cls.azure_api_prefix,\n base,\n extn,\n api_version,\n )\n elif typed_api_type == ApiType.OPEN_AI:\n url = \"%s/%s/cancel\" % (base, extn)\n else:\n raise error.InvalidAPIType(\"Unsupported API type %s\" % api_type)\n\n instance = cls(id, api_key, **params)\n return instance, url\n\n @classmethod\n def cancel(\n cls,\n id,\n api_key=None,\n api_type=None,\n request_id=None,\n api_version=None,\n **params,\n ):\n instance, url = cls._prepare_cancel(\n id,\n api_key,\n api_type,\n request_id,\n api_version,\n **params,\n )\n return instance.request(\"post\", url, request_id=request_id)\n\n @classmethod\n def acancel(\n cls,\n id,\n api_key=None,\n api_type=None,\n request_id=None,\n api_version=None,\n **params,\n ):\n instance, url = cls._prepare_cancel(\n id,\n api_key,\n api_type,\n request_id,\n api_version,\n **params,\n )\n return instance.arequest(\"post\", url, request_id=request_id)\n\n @classmethod\n def _prepare_stream_events(\n cls,\n id,\n api_key=None,\n api_base=None,\n api_type=None,\n request_id=None,\n api_version=None,\n organization=None,\n **params,\n ):\n base = cls.class_url()\n extn = quote_plus(id)\n\n requestor = api_requestor.APIRequestor(\n api_key,\n api_base=api_base,\n api_type=api_type,\n api_version=api_version,\n organization=organization,\n )\n\n typed_api_type, api_version = cls._get_api_type_and_version(\n api_type, api_version\n )\n\n if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):\n url = \"/%s%s/%s/events?stream=true&api-version=%s\" % (\n cls.azure_api_prefix,\n base,\n extn,\n api_version,\n )\n elif typed_api_type == ApiType.OPEN_AI:\n url = \"%s/%s/events?stream=true\" % (base, extn)\n else:\n raise error.InvalidAPIType(\"Unsupported API type %s\" % api_type)\n\n return requestor, url\n\n @classmethod\n async def stream_events(\n cls,\n id,\n api_key=None,\n api_base=None,\n api_type=None,\n request_id=None,\n api_version=None,\n organization=None,\n **params,\n ):\n requestor, url = cls._prepare_stream_events(\n id,\n api_key,\n api_base,\n api_type,\n request_id,\n api_version,\n organization,\n **params,\n )\n\n response, _, api_key = await requestor.arequest(\n \"get\", url, params, stream=True, request_id=request_id\n )\n\n assert not isinstance(response, OpenAIResponse) # must be an iterator\n return (\n util.convert_to_openai_object(\n line,\n api_key,\n api_version,\n organization,\n )\n for line in response\n )\n", "path": "openai/api_resources/fine_tune.py"}]}
| 2,221 | 313 |
gh_patches_debug_19504
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-828
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide navigation page at the breadcrumbs
--
https://www.shopelectro.ru/pages/navigation/ зачем оно ? можем вложенные страницы поднять на уровень выше ?
From this doc:
https://docs.google.com/document/d/1y4xUQhQ4V_Ln5xwcEB2zMp7KTUQnkn9agKTTAU1z0-c/edit
</issue>
<code>
[start of shopelectro/management/commands/excel.py]
1 """
2 Generate Excel price-list.
3
4 Use this excel editor lib: https://openpyxl.readthedocs.io/en/stable/
5 """
6 import datetime
7 import os
8 from collections import namedtuple
9
10 import openpyxl
11 from django.conf import settings
12 from django.core.management.base import BaseCommand
13 from openpyxl.styles import borders, colors, Font
14
15 from shopelectro.models import Product, Category
16
17
18 class Command(BaseCommand):
19 TEMPLATE = 'templates/ecommerce/template.xlsx'
20 NAME = 'pricelist.xlsx'
21 SHEET_TITLE = 'Прайс Shopelectro'
22 CATEGORY_FILL = openpyxl.styles.PatternFill(
23 start_color='F4FEFD',
24 end_color='F4FEFD',
25 fill_type='solid'
26 )
27 BUY_FILL = openpyxl.styles.PatternFill(
28 start_color='FEFEF0',
29 end_color='FEFEF0',
30 fill_type='solid'
31 )
32 THIN_BORDER = borders.Border(
33 top=borders.Side(style='thin'),
34 right=borders.Side(style='thin'),
35 bottom=borders.Side(style='thin'),
36 left=borders.Side(style='thin')
37 )
38 CURRENT_ROW = '9' # Start of catalog section in file.
39 cell = namedtuple('cell', ['row', 'col'])
40 BAD_STYLED_CELLS = ['D5', 'E5', 'D6', 'G8']
41
42 def __init__(self, *args, **kwargs):
43 super(Command, self).__init__(*args, **kwargs)
44 self.file, self.sheet = self.load_file_and_sheet()
45
46 def handle(self, *args, **options):
47 """Open template's file and start proceeding it."""
48 self.set_collapse_controls()
49 self.fill_header()
50 self.write_catalog()
51 self.hide_formulas()
52 self.set_styles()
53 base_dir = settings.ASSETS_DIR
54 self.file.save(os.path.join(base_dir, self.NAME))
55
56 def set_styles(self):
57 for cell in self.BAD_STYLED_CELLS:
58 self.sheet[cell].border = self.THIN_BORDER
59
60 def set_collapse_controls(self):
61 """
62 Place collapse buttons above rows.
63
64 Collapse controls looks like this: http://prntscr.com/clf9xh. # Ignore InvalidLinkBear
65 Doc link: https://goo.gl/nR5pLO
66 """
67 self.sheet.sheet_properties.outlinePr.summaryBelow = False
68
69 def increase_row(self):
70 self.CURRENT_ROW = str(int(self.CURRENT_ROW) + 1)
71 return self.CURRENT_ROW
72
73 def get_row(self, row_number):
74 return self.sheet.row_dimensions[int(row_number)]
75
76 def load_file_and_sheet(self):
77 """
78 Load template file into openpyxl.
79
80 Return tuple with opened openpyxl file's object and active price sheet.
81 """
82 file = openpyxl.load_workbook(os.path.join(
83 settings.BASE_DIR, self.TEMPLATE))
84 return file, file.get_sheet_by_name('Прайслист')
85
86 def fill_header(self):
87 """Fill header of a sheet with date and title."""
88 date_cell = 'C5'
89 self.sheet.title = self.SHEET_TITLE
90 self.sheet[date_cell] = datetime.date.strftime(
91 datetime.date.today(), '%d.%m.%Y')
92
93 def hide_formulas(self):
94 """Hide formulas for calculating totals."""
95 self.sheet.column_dimensions.group('H', 'K', hidden=True, outline_level=0)
96
97 def write_catalog(self):
98 """Write categories and products to sheet."""
99 categories = Category.objects.all().order_by('name').filter(children=None)
100 for category in categories.iterator():
101 self.write_category_with_products(category)
102
103 def write_category_with_products(self, category):
104 """Write category line and beside that - all of products in this category."""
105 def hide_row(row):
106 row.hidden = True
107 row.outlineLevel = 1
108
109 def collapse_row(row):
110 row.collapsed = True
111
112 def write_product_rows():
113 """Write products lines."""
114 sheet = self.sheet
115 products = Product.objects.filter(category=category, page__is_active=True)
116 for product in products.iterator():
117 product_start = 'A' + self.CURRENT_ROW
118 sheet[product_start] = product.name
119 sheet[product_start].font = Font(color=colors.BLUE)
120 sheet[product_start].hyperlink = settings.BASE_URL + product.url
121 sheet[product_start].border = self.THIN_BORDER
122 prices = [
123 product.price,
124 product.wholesale_small,
125 product.wholesale_medium,
126 product.wholesale_large,
127 ]
128 for price, total in zip('CDEF', 'HIJK'):
129 sheet[price + self.CURRENT_ROW] = prices.pop(0)
130 sheet[total + self.CURRENT_ROW] = (
131 '={0}{1}*G{1}'.format(price, self.CURRENT_ROW)
132 )
133
134 sheet[price + self.CURRENT_ROW].border = self.THIN_BORDER
135
136 sheet['G' + self.CURRENT_ROW].fill = self.BUY_FILL
137 sheet['G' + self.CURRENT_ROW].border = self.THIN_BORDER
138
139 hide_row(self.get_row(self.CURRENT_ROW))
140 self.increase_row()
141
142 def write_category_row():
143 """Merge category line into one cell and write to it."""
144 sheet = self.sheet
145 collapse_row(self.get_row(self.CURRENT_ROW))
146
147 category_start = 'A' + self.CURRENT_ROW
148 category_line = '{}:{}'.format(
149 category_start, 'G' + self.CURRENT_ROW)
150 sheet.merge_cells(category_line)
151 sheet[category_start] = category.name
152 sheet[category_start].fill = self.CATEGORY_FILL
153
154 self.increase_row()
155
156 write_category_row()
157 write_product_rows()
158
[end of shopelectro/management/commands/excel.py]
[start of shopelectro/sitemaps.py]
1 from typing import Generator, Tuple
2
3 from django.contrib.sitemaps import Sitemap
4 from django.urls import reverse
5
6 from pages.models import CustomPage, Page, PageManager
7 from shopelectro.models import Category, Product, Tag
8
9
10 class AbstractSitemap(Sitemap):
11 protocol = 'https'
12 changefreq = 'weekly'
13 priority = 0.9
14
15
16 class IndexSitemap(Sitemap):
17 protocol = 'https'
18 changefreq = 'monthly'
19 priority = 1
20
21 # items()
22 # Required. A method that returns a list of objects.
23 # https://docs.djangoproject.com/ja/1.9/ref/contrib/sitemaps/#django.contrib.sitemaps.Sitemap.items
24 def items(self):
25 return [CustomPage.objects.get(slug='')]
26
27
28 class CategorySitemap(AbstractSitemap):
29
30 def items(self):
31 return Category.objects.filter(page__is_active=True)
32
33
34 def get_categories_with_tags() -> Generator[
35 Tuple[Category, Tag], None, None
36 ]:
37 """
38 Return all unique Category+TagGroup pairs.
39
40 Currently, tags per category is limited to 1 tag (by SEO requirements).
41 So, for each tags group in each category we'll get 1 tag.
42 """
43 for category in Category.objects.filter(page__is_active=True):
44 products = Product.objects.filter_descendants(category)
45 tags = Tag.objects.filter_by_products(products)
46 for group_name, group_tags in tags.group_tags().items():
47 for group_tag in group_tags:
48 yield category, group_tag
49
50
51 class CategoryWithTagsSitemap(AbstractSitemap):
52
53 def items(self):
54 # `items` method can't return generator (by django design)
55 # so we moved items collection code to dedicated function
56 return list(get_categories_with_tags())
57
58 def location(self, item):
59 category, tag = item
60 return reverse('category', kwargs={
61 'slug': category.page.slug,
62 'tags': tag.slug,
63 })
64
65
66 class ProductSitemap(AbstractSitemap):
67
68 def items(self):
69 return Product.objects.filter(page__is_active=True)
70
71
72 class PagesSitemap(AbstractSitemap):
73
74 def items(self):
75 assert(isinstance(Page.objects, PageManager))
76 return Page.objects.active()
77
[end of shopelectro/sitemaps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shopelectro/management/commands/excel.py b/shopelectro/management/commands/excel.py
--- a/shopelectro/management/commands/excel.py
+++ b/shopelectro/management/commands/excel.py
@@ -96,7 +96,7 @@
def write_catalog(self):
"""Write categories and products to sheet."""
- categories = Category.objects.all().order_by('name').filter(children=None)
+ categories = Category.objects.active().order_by('name').filter(children=None)
for category in categories.iterator():
self.write_category_with_products(category)
diff --git a/shopelectro/sitemaps.py b/shopelectro/sitemaps.py
--- a/shopelectro/sitemaps.py
+++ b/shopelectro/sitemaps.py
@@ -1,3 +1,4 @@
+from datetime import datetime
from typing import Generator, Tuple
from django.contrib.sitemaps import Sitemap
@@ -24,6 +25,9 @@
def items(self):
return [CustomPage.objects.get(slug='')]
+ def lastmod(self, _):
+ return datetime.now()
+
class CategorySitemap(AbstractSitemap):
|
{"golden_diff": "diff --git a/shopelectro/management/commands/excel.py b/shopelectro/management/commands/excel.py\n--- a/shopelectro/management/commands/excel.py\n+++ b/shopelectro/management/commands/excel.py\n@@ -96,7 +96,7 @@\n \n def write_catalog(self):\n \"\"\"Write categories and products to sheet.\"\"\"\n- categories = Category.objects.all().order_by('name').filter(children=None)\n+ categories = Category.objects.active().order_by('name').filter(children=None)\n for category in categories.iterator():\n self.write_category_with_products(category)\n \ndiff --git a/shopelectro/sitemaps.py b/shopelectro/sitemaps.py\n--- a/shopelectro/sitemaps.py\n+++ b/shopelectro/sitemaps.py\n@@ -1,3 +1,4 @@\n+from datetime import datetime\n from typing import Generator, Tuple\n \n from django.contrib.sitemaps import Sitemap\n@@ -24,6 +25,9 @@\n def items(self):\n return [CustomPage.objects.get(slug='')]\n \n+ def lastmod(self, _):\n+ return datetime.now()\n+\n \n class CategorySitemap(AbstractSitemap):\n", "issue": "Hide navigation page at the breadcrumbs\n--\r\n\r\nhttps://www.shopelectro.ru/pages/navigation/ \u0437\u0430\u0447\u0435\u043c \u043e\u043d\u043e ? \u043c\u043e\u0436\u0435\u043c \u0432\u043b\u043e\u0436\u0435\u043d\u043d\u044b\u0435 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u044b \u043f\u043e\u0434\u043d\u044f\u0442\u044c \u043d\u0430 \u0443\u0440\u043e\u0432\u0435\u043d\u044c \u0432\u044b\u0448\u0435 ? \r\n\r\nFrom this doc:\r\nhttps://docs.google.com/document/d/1y4xUQhQ4V_Ln5xwcEB2zMp7KTUQnkn9agKTTAU1z0-c/edit\r\n\n", "before_files": [{"content": "\"\"\"\nGenerate Excel price-list.\n\nUse this excel editor lib: https://openpyxl.readthedocs.io/en/stable/\n\"\"\"\nimport datetime\nimport os\nfrom collections import namedtuple\n\nimport openpyxl\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom openpyxl.styles import borders, colors, Font\n\nfrom shopelectro.models import Product, Category\n\n\nclass Command(BaseCommand):\n TEMPLATE = 'templates/ecommerce/template.xlsx'\n NAME = 'pricelist.xlsx'\n SHEET_TITLE = '\u041f\u0440\u0430\u0439\u0441 Shopelectro'\n CATEGORY_FILL = openpyxl.styles.PatternFill(\n start_color='F4FEFD',\n end_color='F4FEFD',\n fill_type='solid'\n )\n BUY_FILL = openpyxl.styles.PatternFill(\n start_color='FEFEF0',\n end_color='FEFEF0',\n fill_type='solid'\n )\n THIN_BORDER = borders.Border(\n top=borders.Side(style='thin'),\n right=borders.Side(style='thin'),\n bottom=borders.Side(style='thin'),\n left=borders.Side(style='thin')\n )\n CURRENT_ROW = '9' # Start of catalog section in file.\n cell = namedtuple('cell', ['row', 'col'])\n BAD_STYLED_CELLS = ['D5', 'E5', 'D6', 'G8']\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.file, self.sheet = self.load_file_and_sheet()\n\n def handle(self, *args, **options):\n \"\"\"Open template's file and start proceeding it.\"\"\"\n self.set_collapse_controls()\n self.fill_header()\n self.write_catalog()\n self.hide_formulas()\n self.set_styles()\n base_dir = settings.ASSETS_DIR\n self.file.save(os.path.join(base_dir, self.NAME))\n\n def set_styles(self):\n for cell in self.BAD_STYLED_CELLS:\n self.sheet[cell].border = self.THIN_BORDER\n\n def set_collapse_controls(self):\n \"\"\"\n Place collapse buttons above rows.\n\n Collapse controls looks like this: http://prntscr.com/clf9xh. # Ignore InvalidLinkBear\n Doc link: https://goo.gl/nR5pLO\n \"\"\"\n self.sheet.sheet_properties.outlinePr.summaryBelow = False\n\n def increase_row(self):\n self.CURRENT_ROW = str(int(self.CURRENT_ROW) + 1)\n return self.CURRENT_ROW\n\n def get_row(self, row_number):\n return self.sheet.row_dimensions[int(row_number)]\n\n def load_file_and_sheet(self):\n \"\"\"\n Load template file into openpyxl.\n\n Return tuple with opened openpyxl file's object and active price sheet.\n \"\"\"\n file = openpyxl.load_workbook(os.path.join(\n settings.BASE_DIR, self.TEMPLATE))\n return file, file.get_sheet_by_name('\u041f\u0440\u0430\u0439\u0441\u043b\u0438\u0441\u0442')\n\n def fill_header(self):\n \"\"\"Fill header of a sheet with date and title.\"\"\"\n date_cell = 'C5'\n self.sheet.title = self.SHEET_TITLE\n self.sheet[date_cell] = datetime.date.strftime(\n datetime.date.today(), '%d.%m.%Y')\n\n def hide_formulas(self):\n \"\"\"Hide formulas for calculating totals.\"\"\"\n self.sheet.column_dimensions.group('H', 'K', hidden=True, outline_level=0)\n\n def write_catalog(self):\n \"\"\"Write categories and products to sheet.\"\"\"\n categories = Category.objects.all().order_by('name').filter(children=None)\n for category in categories.iterator():\n self.write_category_with_products(category)\n\n def write_category_with_products(self, category):\n \"\"\"Write category line and beside that - all of products in this category.\"\"\"\n def hide_row(row):\n row.hidden = True\n row.outlineLevel = 1\n\n def collapse_row(row):\n row.collapsed = True\n\n def write_product_rows():\n \"\"\"Write products lines.\"\"\"\n sheet = self.sheet\n products = Product.objects.filter(category=category, page__is_active=True)\n for product in products.iterator():\n product_start = 'A' + self.CURRENT_ROW\n sheet[product_start] = product.name\n sheet[product_start].font = Font(color=colors.BLUE)\n sheet[product_start].hyperlink = settings.BASE_URL + product.url\n sheet[product_start].border = self.THIN_BORDER\n prices = [\n product.price,\n product.wholesale_small,\n product.wholesale_medium,\n product.wholesale_large,\n ]\n for price, total in zip('CDEF', 'HIJK'):\n sheet[price + self.CURRENT_ROW] = prices.pop(0)\n sheet[total + self.CURRENT_ROW] = (\n '={0}{1}*G{1}'.format(price, self.CURRENT_ROW)\n )\n\n sheet[price + self.CURRENT_ROW].border = self.THIN_BORDER\n\n sheet['G' + self.CURRENT_ROW].fill = self.BUY_FILL\n sheet['G' + self.CURRENT_ROW].border = self.THIN_BORDER\n\n hide_row(self.get_row(self.CURRENT_ROW))\n self.increase_row()\n\n def write_category_row():\n \"\"\"Merge category line into one cell and write to it.\"\"\"\n sheet = self.sheet\n collapse_row(self.get_row(self.CURRENT_ROW))\n\n category_start = 'A' + self.CURRENT_ROW\n category_line = '{}:{}'.format(\n category_start, 'G' + self.CURRENT_ROW)\n sheet.merge_cells(category_line)\n sheet[category_start] = category.name\n sheet[category_start].fill = self.CATEGORY_FILL\n\n self.increase_row()\n\n write_category_row()\n write_product_rows()\n", "path": "shopelectro/management/commands/excel.py"}, {"content": "from typing import Generator, Tuple\n\nfrom django.contrib.sitemaps import Sitemap\nfrom django.urls import reverse\n\nfrom pages.models import CustomPage, Page, PageManager\nfrom shopelectro.models import Category, Product, Tag\n\n\nclass AbstractSitemap(Sitemap):\n protocol = 'https'\n changefreq = 'weekly'\n priority = 0.9\n\n\nclass IndexSitemap(Sitemap):\n protocol = 'https'\n changefreq = 'monthly'\n priority = 1\n\n # items()\n # Required. A method that returns a list of objects.\n # https://docs.djangoproject.com/ja/1.9/ref/contrib/sitemaps/#django.contrib.sitemaps.Sitemap.items\n def items(self):\n return [CustomPage.objects.get(slug='')]\n\n\nclass CategorySitemap(AbstractSitemap):\n\n def items(self):\n return Category.objects.filter(page__is_active=True)\n\n\ndef get_categories_with_tags() -> Generator[\n Tuple[Category, Tag], None, None\n]:\n \"\"\"\n Return all unique Category+TagGroup pairs.\n\n Currently, tags per category is limited to 1 tag (by SEO requirements).\n So, for each tags group in each category we'll get 1 tag.\n \"\"\"\n for category in Category.objects.filter(page__is_active=True):\n products = Product.objects.filter_descendants(category)\n tags = Tag.objects.filter_by_products(products)\n for group_name, group_tags in tags.group_tags().items():\n for group_tag in group_tags:\n yield category, group_tag\n\n\nclass CategoryWithTagsSitemap(AbstractSitemap):\n\n def items(self):\n # `items` method can't return generator (by django design)\n # so we moved items collection code to dedicated function\n return list(get_categories_with_tags())\n\n def location(self, item):\n category, tag = item\n return reverse('category', kwargs={\n 'slug': category.page.slug,\n 'tags': tag.slug,\n })\n\n\nclass ProductSitemap(AbstractSitemap):\n\n def items(self):\n return Product.objects.filter(page__is_active=True)\n\n\nclass PagesSitemap(AbstractSitemap):\n\n def items(self):\n assert(isinstance(Page.objects, PageManager))\n return Page.objects.active()\n", "path": "shopelectro/sitemaps.py"}]}
| 2,897 | 264 |
gh_patches_debug_27439
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-1526
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add cross-environment testing to GitHub Actions
Instead of just testing `python:3.7`, we should test
* Multiple operating systems
* Multiple versions of Python
https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idruns-on
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import subprocess
18
19 from distutils.cmd import Command
20 from setuptools import find_packages
21
22 try:
23 from setuptools import setup
24 from setuptools.command.install import install
25 from setuptools.command.develop import develop
26 from setuptools.command.egg_info import egg_info
27 from setuptools.command.sdist import sdist
28 from setuptools.command.build_py import build_py
29 except ImportError:
30 from distutils.core import setup
31 from distutils.command.install import install
32 from distutils.command.build_py import build_py
33
34 NAME = "feast"
35 DESCRIPTION = "Python SDK for Feast"
36 URL = "https://github.com/feast-dev/feast"
37 AUTHOR = "Feast"
38 REQUIRES_PYTHON = ">=3.7.0"
39
40 REQUIRED = [
41 "Click==7.*",
42 "colorama>=0.3.9",
43 "fastavro>=0.22.11,<0.23",
44 "google-api-core>=1.23.0",
45 "googleapis-common-protos==1.52.*",
46 "grpcio>=1.32.0",
47 "Jinja2>=2.0.0",
48 "jsonschema",
49 "mmh3",
50 "numpy<1.20.0",
51 "pandas~=1.0.0",
52 "pandavro==1.5.*",
53 "protobuf>=3.10",
54 "pyarrow==2.0.0",
55 "pydantic>=1.0.0",
56 "PyYAML==5.3.*",
57 "tabulate==0.8.*",
58 "toml==0.10.*",
59 "tqdm==4.*",
60 ]
61
62 GCP_REQUIRED = [
63 "google-cloud-bigquery>=2.0.*",
64 "google-cloud-bigquery-storage >= 2.0.0",
65 "google-cloud-datastore>=2.1.*",
66 "google-cloud-storage>=1.20.*",
67 "google-cloud-core==1.4.*",
68 ]
69
70 CI_REQUIRED = [
71 "cryptography==3.3.2",
72 "flake8",
73 "black==19.10b0",
74 "isort>=5",
75 "grpcio-tools>=1.32.0",
76 "grpcio-testing>=1.32.0",
77 "mock==2.0.0",
78 "moto",
79 "mypy==0.790",
80 "mypy-protobuf==1.24",
81 "avro==1.10.0",
82 "gcsfs",
83 "urllib3>=1.25.4",
84 "pytest==6.0.0",
85 "pytest-lazy-fixture==0.6.3",
86 "pytest-timeout==1.4.2",
87 "pytest-ordering==0.6.*",
88 "pytest-mock==1.10.4",
89 "Sphinx",
90 "sphinx-rtd-theme",
91 "tenacity",
92 "adlfs==0.5.9",
93 "firebase-admin==4.5.2",
94 "pre-commit",
95 "assertpy==1.1",
96 "google-cloud-bigquery>=2.0.*",
97 "google-cloud-bigquery-storage >= 2.0.0",
98 "google-cloud-datastore>=2.1.*",
99 "google-cloud-storage>=1.20.*",
100 "google-cloud-core==1.4.*",
101 ]
102
103 # README file from Feast repo root directory
104 repo_root = (
105 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
106 .communicate()[0]
107 .rstrip()
108 .decode("utf-8")
109 )
110 README_FILE = os.path.join(repo_root, "README.md")
111 with open(README_FILE, "r") as f:
112 LONG_DESCRIPTION = f.read()
113
114 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
115 # Regex modified from default tag regex in:
116 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
117 TAG_REGEX = re.compile(
118 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
119 )
120
121
122 class BuildProtoCommand(Command):
123 description = "Builds the proto files into python files."
124
125 def initialize_options(self):
126 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
127 self.proto_folder = os.path.join(repo_root, "protos")
128 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
129 self.sub_folders = ["core", "serving", "types", "storage"]
130
131 def finalize_options(self):
132 pass
133
134 def _generate_protos(self, path):
135 proto_files = glob.glob(os.path.join(self.proto_folder, path))
136
137 subprocess.check_call(self.protoc + [
138 '-I', self.proto_folder,
139 '--python_out', self.this_package,
140 '--grpc_python_out', self.this_package,
141 '--mypy_out', self.this_package] + proto_files)
142
143 def run(self):
144 for sub_folder in self.sub_folders:
145 self._generate_protos(f'feast/{sub_folder}/*.proto')
146
147 from pathlib import Path
148
149 for path in Path('feast/protos').rglob('*.py'):
150 for folder in self.sub_folders:
151 # Read in the file
152 with open(path, 'r') as file:
153 filedata = file.read()
154
155 # Replace the target string
156 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
157
158 # Write the file out again
159 with open(path, 'w') as file:
160 file.write(filedata)
161
162
163 class BuildCommand(build_py):
164 """Custom build command."""
165
166 def run(self):
167 self.run_command('build_proto')
168 build_py.run(self)
169
170
171 class DevelopCommand(develop):
172 """Custom develop command."""
173
174 def run(self):
175 self.run_command('build_proto')
176 develop.run(self)
177
178
179 setup(
180 name=NAME,
181 author=AUTHOR,
182 description=DESCRIPTION,
183 long_description=LONG_DESCRIPTION,
184 long_description_content_type="text/markdown",
185 python_requires=REQUIRES_PYTHON,
186 url=URL,
187 packages=find_packages(exclude=("tests",)),
188 install_requires=REQUIRED,
189 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
190 # Install dev requirements with: pip install -e .[dev]
191 extras_require={
192 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
193 "ci": CI_REQUIRED,
194 "gcp": GCP_REQUIRED,
195 },
196 include_package_data=True,
197 license="Apache",
198 classifiers=[
199 # Trove classifiers
200 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
201 "License :: OSI Approved :: Apache Software License",
202 "Programming Language :: Python",
203 "Programming Language :: Python :: 3",
204 "Programming Language :: Python :: 3.7",
205 ],
206 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
207 use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
208 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools>=1.32.0", "mypy-protobuf", "sphinx"],
209 package_data={
210 "": [
211 "protos/feast/**/*.proto",
212 "protos/feast/third_party/grpc/health/v1/*.proto",
213 "protos/tensorflow_metadata/proto/v0/*.proto",
214 "feast/protos/feast/**/*.py",
215 "tensorflow_metadata/proto/v0/*.py"
216 ],
217 },
218 cmdclass={
219 "build_proto": BuildProtoCommand,
220 "build_py": BuildCommand,
221 "develop": DevelopCommand,
222 },
223 )
224
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -43,15 +43,14 @@
"fastavro>=0.22.11,<0.23",
"google-api-core>=1.23.0",
"googleapis-common-protos==1.52.*",
- "grpcio>=1.32.0",
+ "grpcio>=1.34.0",
"Jinja2>=2.0.0",
"jsonschema",
"mmh3",
- "numpy<1.20.0",
- "pandas~=1.0.0",
+ "pandas>=1.0.0",
"pandavro==1.5.*",
"protobuf>=3.10",
- "pyarrow==2.0.0",
+ "pyarrow>=2.0.0",
"pydantic>=1.0.0",
"PyYAML==5.3.*",
"tabulate==0.8.*",
@@ -72,8 +71,8 @@
"flake8",
"black==19.10b0",
"isort>=5",
- "grpcio-tools>=1.32.0",
- "grpcio-testing>=1.32.0",
+ "grpcio-tools==1.34.0",
+ "grpcio-testing==1.34.0",
"mock==2.0.0",
"moto",
"mypy==0.790",
@@ -205,7 +204,7 @@
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
- setup_requires=["setuptools_scm", "grpcio", "grpcio-tools>=1.32.0", "mypy-protobuf", "sphinx"],
+ setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx"],
package_data={
"": [
"protos/feast/**/*.proto",
|
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -43,15 +43,14 @@\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n- \"grpcio>=1.32.0\",\n+ \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n- \"numpy<1.20.0\",\n- \"pandas~=1.0.0\",\n+ \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n- \"pyarrow==2.0.0\",\n+ \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n@@ -72,8 +71,8 @@\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n- \"grpcio-tools>=1.32.0\",\n- \"grpcio-testing>=1.32.0\",\n+ \"grpcio-tools==1.34.0\",\n+ \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n@@ -205,7 +204,7 @@\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n- setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools>=1.32.0\", \"mypy-protobuf\", \"sphinx\"],\n+ setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n", "issue": "Add cross-environment testing to GitHub Actions\nInstead of just testing `python:3.7`, we should test\r\n* Multiple operating systems\r\n* Multiple versions of Python\r\n\r\nhttps://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idruns-on\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.32.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy<1.20.0\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow==2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools>=1.32.0\",\n \"grpcio-testing>=1.32.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools>=1.32.0\", \"mypy-protobuf\", \"sphinx\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]}
| 3,101 | 519 |
gh_patches_debug_7961
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-9387
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default value for "mode" in "ndimage.shift"
In scipy.ndimage.shift the mode's default value is 'constant', but [in the documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.shift.html#scipy.ndimage.shift) it says Default is 'reflect'.
Which of the two is wrong?
</issue>
<code>
[start of scipy/ndimage/_ni_docstrings.py]
1 """Docstring components common to several ndimage functions."""
2 from __future__ import division, print_function, absolute_import
3
4 from scipy.misc import doccer
5
6 __all__ = ['docfiller']
7
8
9 _input_doc = (
10 """input : array_like
11 The input array.""")
12 _axis_doc = (
13 """axis : int, optional
14 The axis of `input` along which to calculate. Default is -1.""")
15 _output_doc = (
16 """output : array or dtype, optional
17 The array in which to place the output, or the dtype of the
18 returned array. By default an array of the same dtype as input
19 will be created.""")
20 _size_foot_doc = (
21 """size : scalar or tuple, optional
22 See footprint, below. Ignored if footprint is given.
23 footprint : array, optional
24 Either `size` or `footprint` must be defined. `size` gives
25 the shape that is taken from the input array, at every element
26 position, to define the input to the filter function.
27 `footprint` is a boolean array that specifies (implicitly) a
28 shape, but also which of the elements within this shape will get
29 passed to the filter function. Thus ``size=(n,m)`` is equivalent
30 to ``footprint=np.ones((n,m))``. We adjust `size` to the number
31 of dimensions of the input array, so that, if the input array is
32 shape (10,10,10), and `size` is 2, then the actual size used is
33 (2,2,2). When `footprint` is given, `size` is ignored.""")
34 _mode_doc = (
35 """mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
36 The `mode` parameter determines how the input array is extended
37 when the filter overlaps a border. Default is 'reflect'. Behavior
38 for each valid value is as follows:
39
40 'reflect' (`d c b a | a b c d | d c b a`)
41 The input is extended by reflecting about the edge of the last
42 pixel.
43
44 'constant' (`k k k k | a b c d | k k k k`)
45 The input is extended by filling all values beyond the edge with
46 the same constant value, defined by the `cval` parameter.
47
48 'nearest' (`a a a a | a b c d | d d d d`)
49 The input is extended by replicating the last pixel.
50
51 'mirror' (`d c b | a b c d | c b a`)
52 The input is extended by reflecting about the center of the last
53 pixel.
54
55 'wrap' (`a b c d | a b c d | a b c d`)
56 The input is extended by wrapping around to the opposite edge.""")
57 _mode_multiple_doc = (
58 """mode : str or sequence, optional
59 The `mode` parameter determines how the input array is extended
60 when the filter overlaps a border. By passing a sequence of modes
61 with length equal to the number of dimensions of the input array,
62 different modes can be specified along each axis. Default value is
63 'reflect'. The valid values and their behavior is as follows:
64
65 'reflect' (`d c b a | a b c d | d c b a`)
66 The input is extended by reflecting about the edge of the last
67 pixel.
68
69 'constant' (`k k k k | a b c d | k k k k`)
70 The input is extended by filling all values beyond the edge with
71 the same constant value, defined by the `cval` parameter.
72
73 'nearest' (`a a a a | a b c d | d d d d`)
74 The input is extended by replicating the last pixel.
75
76 'mirror' (`d c b | a b c d | c b a`)
77 The input is extended by reflecting about the center of the last
78 pixel.
79
80 'wrap' (`a b c d | a b c d | a b c d`)
81 The input is extended by wrapping around to the opposite edge.""")
82 _cval_doc = (
83 """cval : scalar, optional
84 Value to fill past edges of input if `mode` is 'constant'. Default
85 is 0.0.""")
86 _origin_doc = (
87 """origin : int, optional
88 Controls the placement of the filter on the input array's pixels.
89 A value of 0 (the default) centers the filter over the pixel, with
90 positive values shifting the filter to the left, and negative ones
91 to the right.""")
92 _origin_multiple_doc = (
93 """origin : int or sequence, optional
94 Controls the placement of the filter on the input array's pixels.
95 A value of 0 (the default) centers the filter over the pixel, with
96 positive values shifting the filter to the left, and negative ones
97 to the right. By passing a sequence of origins with length equal to
98 the number of dimensions of the input array, different shifts can
99 be specified along each axis.""")
100 _extra_arguments_doc = (
101 """extra_arguments : sequence, optional
102 Sequence of extra positional arguments to pass to passed function.""")
103 _extra_keywords_doc = (
104 """extra_keywords : dict, optional
105 dict of extra keyword arguments to pass to passed function.""")
106 _prefilter_doc = (
107 """prefilter : bool, optional
108 Determines if the input array is prefiltered with `spline_filter`
109 before interpolation. The default is True, which will create a
110 temporary `float64` array of filtered values if `order > 1`. If
111 setting this to False, the output will be slightly blurred if
112 `order > 1`, unless the input is prefiltered, i.e. it is the result
113 of calling `spline_filter` on the original input.""")
114
115 docdict = {
116 'input': _input_doc,
117 'axis': _axis_doc,
118 'output': _output_doc,
119 'size_foot': _size_foot_doc,
120 'mode': _mode_doc,
121 'mode_multiple': _mode_multiple_doc,
122 'cval': _cval_doc,
123 'origin': _origin_doc,
124 'origin_multiple': _origin_multiple_doc,
125 'extra_arguments': _extra_arguments_doc,
126 'extra_keywords': _extra_keywords_doc,
127 'prefilter': _prefilter_doc
128 }
129
130 docfiller = doccer.filldoc(docdict)
131
[end of scipy/ndimage/_ni_docstrings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scipy/ndimage/_ni_docstrings.py b/scipy/ndimage/_ni_docstrings.py
--- a/scipy/ndimage/_ni_docstrings.py
+++ b/scipy/ndimage/_ni_docstrings.py
@@ -34,7 +34,7 @@
_mode_doc = (
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the input array is extended
- when the filter overlaps a border. Default is 'reflect'. Behavior
+ when the filter overlaps a border. Default is 'constant'. Behavior
for each valid value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
|
{"golden_diff": "diff --git a/scipy/ndimage/_ni_docstrings.py b/scipy/ndimage/_ni_docstrings.py\n--- a/scipy/ndimage/_ni_docstrings.py\n+++ b/scipy/ndimage/_ni_docstrings.py\n@@ -34,7 +34,7 @@\n _mode_doc = (\n \"\"\"mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the input array is extended\n- when the filter overlaps a border. Default is 'reflect'. Behavior\n+ when the filter overlaps a border. Default is 'constant'. Behavior\n for each valid value is as follows:\n \n 'reflect' (`d c b a | a b c d | d c b a`)\n", "issue": "Default value for \"mode\" in \"ndimage.shift\"\nIn scipy.ndimage.shift the mode's default value is 'constant', but [in the documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.shift.html#scipy.ndimage.shift) it says Default is 'reflect'.\r\nWhich of the two is wrong?\n", "before_files": [{"content": "\"\"\"Docstring components common to several ndimage functions.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom scipy.misc import doccer\n\n__all__ = ['docfiller']\n\n\n_input_doc = (\n\"\"\"input : array_like\n The input array.\"\"\")\n_axis_doc = (\n\"\"\"axis : int, optional\n The axis of `input` along which to calculate. Default is -1.\"\"\")\n_output_doc = (\n\"\"\"output : array or dtype, optional\n The array in which to place the output, or the dtype of the\n returned array. By default an array of the same dtype as input\n will be created.\"\"\")\n_size_foot_doc = (\n\"\"\"size : scalar or tuple, optional\n See footprint, below. Ignored if footprint is given.\nfootprint : array, optional\n Either `size` or `footprint` must be defined. `size` gives\n the shape that is taken from the input array, at every element\n position, to define the input to the filter function.\n `footprint` is a boolean array that specifies (implicitly) a\n shape, but also which of the elements within this shape will get\n passed to the filter function. Thus ``size=(n,m)`` is equivalent\n to ``footprint=np.ones((n,m))``. We adjust `size` to the number\n of dimensions of the input array, so that, if the input array is\n shape (10,10,10), and `size` is 2, then the actual size used is\n (2,2,2). When `footprint` is given, `size` is ignored.\"\"\")\n_mode_doc = (\n\"\"\"mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the input array is extended\n when the filter overlaps a border. Default is 'reflect'. Behavior\n for each valid value is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_mode_multiple_doc = (\n\"\"\"mode : str or sequence, optional\n The `mode` parameter determines how the input array is extended\n when the filter overlaps a border. By passing a sequence of modes\n with length equal to the number of dimensions of the input array,\n different modes can be specified along each axis. Default value is\n 'reflect'. The valid values and their behavior is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_cval_doc = (\n\"\"\"cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0.\"\"\")\n_origin_doc = (\n\"\"\"origin : int, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right.\"\"\")\n_origin_multiple_doc = (\n\"\"\"origin : int or sequence, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right. By passing a sequence of origins with length equal to\n the number of dimensions of the input array, different shifts can\n be specified along each axis.\"\"\")\n_extra_arguments_doc = (\n\"\"\"extra_arguments : sequence, optional\n Sequence of extra positional arguments to pass to passed function.\"\"\")\n_extra_keywords_doc = (\n\"\"\"extra_keywords : dict, optional\n dict of extra keyword arguments to pass to passed function.\"\"\")\n_prefilter_doc = (\n\"\"\"prefilter : bool, optional\n Determines if the input array is prefiltered with `spline_filter`\n before interpolation. The default is True, which will create a\n temporary `float64` array of filtered values if `order > 1`. If\n setting this to False, the output will be slightly blurred if\n `order > 1`, unless the input is prefiltered, i.e. it is the result\n of calling `spline_filter` on the original input.\"\"\")\n\ndocdict = {\n 'input': _input_doc,\n 'axis': _axis_doc,\n 'output': _output_doc,\n 'size_foot': _size_foot_doc,\n 'mode': _mode_doc,\n 'mode_multiple': _mode_multiple_doc,\n 'cval': _cval_doc,\n 'origin': _origin_doc,\n 'origin_multiple': _origin_multiple_doc,\n 'extra_arguments': _extra_arguments_doc,\n 'extra_keywords': _extra_keywords_doc,\n 'prefilter': _prefilter_doc\n }\n\ndocfiller = doccer.filldoc(docdict)\n", "path": "scipy/ndimage/_ni_docstrings.py"}]}
| 2,296 | 165 |
gh_patches_debug_43301
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-7148
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Decorating RoutablePageMixin routes with "route_path" instead of "route" (path vs re_path)
### Is your proposal related to a problem?
In migrating a Django application to a Wagtail app, I've been using RoutablePageMixin in some cases and wanting to use the newer Django django.urls.path patterns instead of the re_path to make copying the url patterns from the original application's urls.py a bit easier.
I've mentally switched away from the regular expressions for url matching and am struggling going back to using the regex's.
### Describe the solution you'd like
I'd like to decorate the route with something like `@route_path` or have `route()` in `routable_page/models.py` have a check to see if the pattern is a regular expression or not and (RegexPattern or RoutePattern).
### Describe alternatives you've considered
For now I've created a custom `route_path` function in `/custom/routable_pages/models.py` that appends a `path` to _routablepage_routes instead of `re_path` (as in stock). It looks like:
/custom/routable_page/models.py
```python
from django.urls import path, re_path
_custom_creation_counter = 0
def route_path(pattern, name=None):
"""
Adds a path more like the Django 2.0+ Url Dispatcher rules
"""
def decorator(view_func):
global _custom_creation_counter
_custom_creation_counter += 1
# Make sure page has _routablepage_routes attribute
if not hasattr(view_func, '_routablepage_routes'):
view_func._routablepage_routes = []
view_func._routablepage_routes.append((
path(pattern, view_func, name=(name or view_func.__name__)),
_custom_creation_counter,
))
return view_func
return decorator
# a copy of the stock route follows right below, included in this location for my own sanity in the global scope
```
and decorate the routable functions with `@route_path` which seems to be working so far, but just getting into it and wondering if I'm going to get burned doing things this way.
/subscribers/models.py
```python
from myprojectname.custom.routable_page.models import route_path
class SubscribePage(RoutablePageMixin, Page):
....
@route_path("subscribe/")
def subscribe(self, request, *args, **kwargs):
"""
Confirm the form is submitted correctly, and conditionally add
the subscriber to the list
"""
from subscribers.forms import SubscribeForm
if request.method == 'POST':
form = SubscribeForm(request.POST)
if form.is_valid():
form_submission = form.save(commit=False)
....
```
### Additional context
Obviously, the 'try it and see' approach applies here, but wondering if there is a deeper design decision in Wagtail which would suggest to not take this direction.
</issue>
<code>
[start of wagtail/contrib/routable_page/models.py]
1 from django.http import Http404
2 from django.template.response import TemplateResponse
3 from django.urls import URLResolver, re_path
4 from django.urls.resolvers import RegexPattern
5
6 from wagtail.models import Page
7 from wagtail.url_routing import RouteResult
8
9 _creation_counter = 0
10
11
12 def route(pattern, name=None):
13 def decorator(view_func):
14 global _creation_counter
15 _creation_counter += 1
16
17 # Make sure page has _routablepage_routes attribute
18 if not hasattr(view_func, "_routablepage_routes"):
19 view_func._routablepage_routes = []
20
21 # Add new route to view
22 view_func._routablepage_routes.append(
23 (
24 re_path(pattern, view_func, name=(name or view_func.__name__)),
25 _creation_counter,
26 )
27 )
28
29 return view_func
30
31 return decorator
32
33
34 class RoutablePageMixin:
35 """
36 This class can be mixed in to a Page model, allowing extra routes to be
37 added to it.
38 """
39
40 @route(r"^$")
41 def index_route(self, request, *args, **kwargs):
42 request.is_preview = getattr(request, "is_preview", False)
43
44 return TemplateResponse(
45 request,
46 self.get_template(request, *args, **kwargs),
47 self.get_context(request, *args, **kwargs),
48 )
49
50 @classmethod
51 def get_subpage_urls(cls):
52 routes = []
53
54 # Loop over this class's defined routes, in method resolution order.
55 # Routes defined in the immediate class take precedence, followed by
56 # immediate superclass and so on
57 for klass in cls.__mro__:
58 routes_for_class = []
59 for val in klass.__dict__.values():
60 if hasattr(val, "_routablepage_routes"):
61 routes_for_class.extend(val._routablepage_routes)
62
63 # sort routes by _creation_counter so that ones earlier in the class definition
64 # take precedence
65 routes_for_class.sort(key=lambda route: route[1])
66
67 routes.extend(route[0] for route in routes_for_class)
68
69 return tuple(routes)
70
71 @classmethod
72 def get_resolver(cls):
73 if "_routablepage_urlresolver" not in cls.__dict__:
74 subpage_urls = cls.get_subpage_urls()
75 cls._routablepage_urlresolver = URLResolver(
76 RegexPattern(r"^/"), subpage_urls
77 )
78
79 return cls._routablepage_urlresolver
80
81 def reverse_subpage(self, name, args=None, kwargs=None):
82 """
83 This method takes a route name/arguments and returns a URL path.
84 """
85 args = args or []
86 kwargs = kwargs or {}
87
88 return self.get_resolver().reverse(name, *args, **kwargs)
89
90 def resolve_subpage(self, path):
91 """
92 This method takes a URL path and finds the view to call.
93 """
94 view, args, kwargs = self.get_resolver().resolve(path)
95
96 # Bind the method
97 view = view.__get__(self, type(self))
98
99 return view, args, kwargs
100
101 def route(self, request, path_components):
102 """
103 This hooks the subpage URLs into Wagtail's routing.
104 """
105 if self.live:
106 try:
107 path = "/"
108 if path_components:
109 path += "/".join(path_components) + "/"
110
111 view, args, kwargs = self.resolve_subpage(path)
112 return RouteResult(self, args=(view, args, kwargs))
113 except Http404:
114 pass
115
116 return super().route(request, path_components)
117
118 def serve(self, request, view=None, args=None, kwargs=None):
119 if args is None:
120 args = []
121 if kwargs is None:
122 kwargs = {}
123 if view is None:
124 return super().serve(request, *args, **kwargs)
125 return view(request, *args, **kwargs)
126
127 def render(self, request, *args, template=None, context_overrides=None, **kwargs):
128 """
129 This method replicates what ``Page.serve()`` usually does when ``RoutablePageMixin``
130 is not used. By default, ``Page.get_template()`` is called to derive the template
131 to use for rendering, and ``Page.get_context()`` is always called to gather the
132 data to be included in the context.
133
134 You can use the ``context_overrides`` keyword argument as a shortcut to override or
135 add new values to the context. For example:
136
137 .. code-block:: python
138
139 @route(r'^$') # override the default route
140 def upcoming_events(self, request):
141 return self.render(request, context_overrides={
142 'title': "Current events",
143 'events': EventPage.objects.live().future(),
144 })
145
146 You can also use the ``template`` argument to specify an alternative
147 template to use for rendering. For example:
148
149 .. code-block:: python
150
151 @route(r'^past/$')
152 def past_events(self, request):
153 return self.render(
154 request,
155 context_overrides={
156 'title': "Past events",
157 'events': EventPage.objects.live().past(),
158 },
159 template="events/event_index_historical.html",
160 )
161 """
162 if template is None:
163 template = self.get_template(request, *args, **kwargs)
164 context = self.get_context(request, *args, **kwargs)
165 context.update(context_overrides or {})
166 return TemplateResponse(request, template, context)
167
168 def serve_preview(self, request, mode_name):
169 view, args, kwargs = self.resolve_subpage("/")
170 return view(request, *args, **kwargs)
171
172
173 class RoutablePage(RoutablePageMixin, Page):
174 """
175 This class extends Page by adding methods which allows extra routes to be
176 added to it.
177 """
178
179 class Meta:
180 abstract = True
181
[end of wagtail/contrib/routable_page/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/contrib/routable_page/models.py b/wagtail/contrib/routable_page/models.py
--- a/wagtail/contrib/routable_page/models.py
+++ b/wagtail/contrib/routable_page/models.py
@@ -1,15 +1,23 @@
+import logging
+from functools import partial
+
+from django.core.checks import Warning
from django.http import Http404
from django.template.response import TemplateResponse
-from django.urls import URLResolver, re_path
-from django.urls.resolvers import RegexPattern
+from django.urls import URLResolver
+from django.urls import path as path_func
+from django.urls import re_path as re_path_func
+from django.urls.resolvers import RegexPattern, RoutePattern
from wagtail.models import Page
from wagtail.url_routing import RouteResult
_creation_counter = 0
+logger = logging.getLogger("wagtail.routablepage")
+
-def route(pattern, name=None):
+def _path(pattern, name=None, func=None):
def decorator(view_func):
global _creation_counter
_creation_counter += 1
@@ -21,7 +29,7 @@
# Add new route to view
view_func._routablepage_routes.append(
(
- re_path(pattern, view_func, name=(name or view_func.__name__)),
+ func(pattern, view_func, name=(name or view_func.__name__)),
_creation_counter,
)
)
@@ -31,13 +39,20 @@
return decorator
+re_path = partial(_path, func=re_path_func)
+path = partial(_path, func=path_func)
+
+# Make route an alias of re_path for backwards compatibility.
+route = re_path
+
+
class RoutablePageMixin:
"""
This class can be mixed in to a Page model, allowing extra routes to be
added to it.
"""
- @route(r"^$")
+ @path("")
def index_route(self, request, *args, **kwargs):
request.is_preview = getattr(request, "is_preview", False)
@@ -78,6 +93,37 @@
return cls._routablepage_urlresolver
+ @classmethod
+ def check(cls, **kwargs):
+ errors = super().check(**kwargs)
+ errors.extend(cls._check_path_with_regex())
+ return errors
+
+ @classmethod
+ def _check_path_with_regex(cls):
+ routes = cls.get_subpage_urls()
+ errors = []
+ for route in routes:
+ if isinstance(route.pattern, RoutePattern):
+ pattern = route.pattern._route
+ if (
+ "(?P<" in pattern
+ or pattern.startswith("^")
+ or pattern.endswith("$")
+ ):
+ errors.append(
+ Warning(
+ (
+ f"Your URL pattern {route.name or route.callback.__name__} has a "
+ "route that contains '(?P<', begins with a '^', or ends with a '$'."
+ ),
+ hint="Decorate your view with re_path if you want to use regexp.",
+ obj=cls,
+ id="wagtailroutablepage.W001",
+ )
+ )
+ return errors
+
def reverse_subpage(self, name, args=None, kwargs=None):
"""
This method takes a route name/arguments and returns a URL path.
@@ -136,7 +182,7 @@
.. code-block:: python
- @route(r'^$') # override the default route
+ @path('') # override the default route
def upcoming_events(self, request):
return self.render(request, context_overrides={
'title': "Current events",
@@ -148,7 +194,7 @@
.. code-block:: python
- @route(r'^past/$')
+ @path('past/')
def past_events(self, request):
return self.render(
request,
|
{"golden_diff": "diff --git a/wagtail/contrib/routable_page/models.py b/wagtail/contrib/routable_page/models.py\n--- a/wagtail/contrib/routable_page/models.py\n+++ b/wagtail/contrib/routable_page/models.py\n@@ -1,15 +1,23 @@\n+import logging\n+from functools import partial\n+\n+from django.core.checks import Warning\n from django.http import Http404\n from django.template.response import TemplateResponse\n-from django.urls import URLResolver, re_path\n-from django.urls.resolvers import RegexPattern\n+from django.urls import URLResolver\n+from django.urls import path as path_func\n+from django.urls import re_path as re_path_func\n+from django.urls.resolvers import RegexPattern, RoutePattern\n \n from wagtail.models import Page\n from wagtail.url_routing import RouteResult\n \n _creation_counter = 0\n \n+logger = logging.getLogger(\"wagtail.routablepage\")\n+\n \n-def route(pattern, name=None):\n+def _path(pattern, name=None, func=None):\n def decorator(view_func):\n global _creation_counter\n _creation_counter += 1\n@@ -21,7 +29,7 @@\n # Add new route to view\n view_func._routablepage_routes.append(\n (\n- re_path(pattern, view_func, name=(name or view_func.__name__)),\n+ func(pattern, view_func, name=(name or view_func.__name__)),\n _creation_counter,\n )\n )\n@@ -31,13 +39,20 @@\n return decorator\n \n \n+re_path = partial(_path, func=re_path_func)\n+path = partial(_path, func=path_func)\n+\n+# Make route an alias of re_path for backwards compatibility.\n+route = re_path\n+\n+\n class RoutablePageMixin:\n \"\"\"\n This class can be mixed in to a Page model, allowing extra routes to be\n added to it.\n \"\"\"\n \n- @route(r\"^$\")\n+ @path(\"\")\n def index_route(self, request, *args, **kwargs):\n request.is_preview = getattr(request, \"is_preview\", False)\n \n@@ -78,6 +93,37 @@\n \n return cls._routablepage_urlresolver\n \n+ @classmethod\n+ def check(cls, **kwargs):\n+ errors = super().check(**kwargs)\n+ errors.extend(cls._check_path_with_regex())\n+ return errors\n+\n+ @classmethod\n+ def _check_path_with_regex(cls):\n+ routes = cls.get_subpage_urls()\n+ errors = []\n+ for route in routes:\n+ if isinstance(route.pattern, RoutePattern):\n+ pattern = route.pattern._route\n+ if (\n+ \"(?P<\" in pattern\n+ or pattern.startswith(\"^\")\n+ or pattern.endswith(\"$\")\n+ ):\n+ errors.append(\n+ Warning(\n+ (\n+ f\"Your URL pattern {route.name or route.callback.__name__} has a \"\n+ \"route that contains '(?P<', begins with a '^', or ends with a '$'.\"\n+ ),\n+ hint=\"Decorate your view with re_path if you want to use regexp.\",\n+ obj=cls,\n+ id=\"wagtailroutablepage.W001\",\n+ )\n+ )\n+ return errors\n+\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method takes a route name/arguments and returns a URL path.\n@@ -136,7 +182,7 @@\n \n .. code-block:: python\n \n- @route(r'^$') # override the default route\n+ @path('') # override the default route\n def upcoming_events(self, request):\n return self.render(request, context_overrides={\n 'title': \"Current events\",\n@@ -148,7 +194,7 @@\n \n .. code-block:: python\n \n- @route(r'^past/$')\n+ @path('past/')\n def past_events(self, request):\n return self.render(\n request,\n", "issue": "Decorating RoutablePageMixin routes with \"route_path\" instead of \"route\" (path vs re_path)\n### Is your proposal related to a problem?\r\n\r\nIn migrating a Django application to a Wagtail app, I've been using RoutablePageMixin in some cases and wanting to use the newer Django django.urls.path patterns instead of the re_path to make copying the url patterns from the original application's urls.py a bit easier.\r\n\r\nI've mentally switched away from the regular expressions for url matching and am struggling going back to using the regex's.\r\n\r\n### Describe the solution you'd like\r\n\r\nI'd like to decorate the route with something like `@route_path` or have `route()` in `routable_page/models.py` have a check to see if the pattern is a regular expression or not and (RegexPattern or RoutePattern). \r\n\r\n### Describe alternatives you've considered\r\nFor now I've created a custom `route_path` function in `/custom/routable_pages/models.py` that appends a `path` to _routablepage_routes instead of `re_path` (as in stock). It looks like:\r\n\r\n/custom/routable_page/models.py\r\n```python\r\nfrom django.urls import path, re_path\r\n\r\n_custom_creation_counter = 0\r\n\r\ndef route_path(pattern, name=None):\r\n \"\"\"\r\n Adds a path more like the Django 2.0+ Url Dispatcher rules\r\n \"\"\"\r\n def decorator(view_func):\r\n global _custom_creation_counter\r\n _custom_creation_counter += 1\r\n # Make sure page has _routablepage_routes attribute\r\n if not hasattr(view_func, '_routablepage_routes'):\r\n view_func._routablepage_routes = []\r\n\r\n view_func._routablepage_routes.append((\r\n path(pattern, view_func, name=(name or view_func.__name__)),\r\n _custom_creation_counter,\r\n ))\r\n return view_func\r\n return decorator\r\n\r\n # a copy of the stock route follows right below, included in this location for my own sanity in the global scope\r\n``` \r\n\r\nand decorate the routable functions with `@route_path` which seems to be working so far, but just getting into it and wondering if I'm going to get burned doing things this way.\r\n\r\n/subscribers/models.py\r\n```python\r\nfrom myprojectname.custom.routable_page.models import route_path\r\n\r\nclass SubscribePage(RoutablePageMixin, Page):\r\n\r\n ....\r\n\r\n @route_path(\"subscribe/\")\r\n def subscribe(self, request, *args, **kwargs):\r\n \"\"\"\r\n Confirm the form is submitted correctly, and conditionally add\r\n the subscriber to the list\r\n \"\"\"\r\n from subscribers.forms import SubscribeForm\r\n if request.method == 'POST':\r\n form = SubscribeForm(request.POST)\r\n if form.is_valid():\r\n form_submission = form.save(commit=False)\r\n ....\r\n```\r\n\r\n\r\n### Additional context\r\nObviously, the 'try it and see' approach applies here, but wondering if there is a deeper design decision in Wagtail which would suggest to not take this direction.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.template.response import TemplateResponse\nfrom django.urls import URLResolver, re_path\nfrom django.urls.resolvers import RegexPattern\n\nfrom wagtail.models import Page\nfrom wagtail.url_routing import RouteResult\n\n_creation_counter = 0\n\n\ndef route(pattern, name=None):\n def decorator(view_func):\n global _creation_counter\n _creation_counter += 1\n\n # Make sure page has _routablepage_routes attribute\n if not hasattr(view_func, \"_routablepage_routes\"):\n view_func._routablepage_routes = []\n\n # Add new route to view\n view_func._routablepage_routes.append(\n (\n re_path(pattern, view_func, name=(name or view_func.__name__)),\n _creation_counter,\n )\n )\n\n return view_func\n\n return decorator\n\n\nclass RoutablePageMixin:\n \"\"\"\n This class can be mixed in to a Page model, allowing extra routes to be\n added to it.\n \"\"\"\n\n @route(r\"^$\")\n def index_route(self, request, *args, **kwargs):\n request.is_preview = getattr(request, \"is_preview\", False)\n\n return TemplateResponse(\n request,\n self.get_template(request, *args, **kwargs),\n self.get_context(request, *args, **kwargs),\n )\n\n @classmethod\n def get_subpage_urls(cls):\n routes = []\n\n # Loop over this class's defined routes, in method resolution order.\n # Routes defined in the immediate class take precedence, followed by\n # immediate superclass and so on\n for klass in cls.__mro__:\n routes_for_class = []\n for val in klass.__dict__.values():\n if hasattr(val, \"_routablepage_routes\"):\n routes_for_class.extend(val._routablepage_routes)\n\n # sort routes by _creation_counter so that ones earlier in the class definition\n # take precedence\n routes_for_class.sort(key=lambda route: route[1])\n\n routes.extend(route[0] for route in routes_for_class)\n\n return tuple(routes)\n\n @classmethod\n def get_resolver(cls):\n if \"_routablepage_urlresolver\" not in cls.__dict__:\n subpage_urls = cls.get_subpage_urls()\n cls._routablepage_urlresolver = URLResolver(\n RegexPattern(r\"^/\"), subpage_urls\n )\n\n return cls._routablepage_urlresolver\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method takes a route name/arguments and returns a URL path.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n return self.get_resolver().reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This method takes a URL path and finds the view to call.\n \"\"\"\n view, args, kwargs = self.get_resolver().resolve(path)\n\n # Bind the method\n view = view.__get__(self, type(self))\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage URLs into Wagtail's routing.\n \"\"\"\n if self.live:\n try:\n path = \"/\"\n if path_components:\n path += \"/\".join(path_components) + \"/\"\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super().route(request, path_components)\n\n def serve(self, request, view=None, args=None, kwargs=None):\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n if view is None:\n return super().serve(request, *args, **kwargs)\n return view(request, *args, **kwargs)\n\n def render(self, request, *args, template=None, context_overrides=None, **kwargs):\n \"\"\"\n This method replicates what ``Page.serve()`` usually does when ``RoutablePageMixin``\n is not used. By default, ``Page.get_template()`` is called to derive the template\n to use for rendering, and ``Page.get_context()`` is always called to gather the\n data to be included in the context.\n\n You can use the ``context_overrides`` keyword argument as a shortcut to override or\n add new values to the context. For example:\n\n .. code-block:: python\n\n @route(r'^$') # override the default route\n def upcoming_events(self, request):\n return self.render(request, context_overrides={\n 'title': \"Current events\",\n 'events': EventPage.objects.live().future(),\n })\n\n You can also use the ``template`` argument to specify an alternative\n template to use for rendering. For example:\n\n .. code-block:: python\n\n @route(r'^past/$')\n def past_events(self, request):\n return self.render(\n request,\n context_overrides={\n 'title': \"Past events\",\n 'events': EventPage.objects.live().past(),\n },\n template=\"events/event_index_historical.html\",\n )\n \"\"\"\n if template is None:\n template = self.get_template(request, *args, **kwargs)\n context = self.get_context(request, *args, **kwargs)\n context.update(context_overrides or {})\n return TemplateResponse(request, template, context)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage(\"/\")\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods which allows extra routes to be\n added to it.\n \"\"\"\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/routable_page/models.py"}]}
| 2,864 | 885 |
gh_patches_debug_1091
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5926
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.mangomolo: error: No plugin can handle URL
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
[cli][info] Your Streamlink version (6.7.2) is up to date!
### Description
Unable to get stream for Kuwaiti channels.. error message: "error: No plugin can handle URL:"
sample URLs:
https://www.media.gov.kw/LiveTV.aspx
https://www.media.gov.kw/LiveTV.aspx?PanChannel=Drama
### Debug log
```text
user@desktop:~ $ streamlink https://www.media.gov.kw/LiveTV.aspx --loglevel=debug
[cli][debug] OS: Linux-6.1.21+-armv6l-with-glibc2.31
[cli][debug] Python: 3.9.2
[cli][debug] OpenSSL: OpenSSL 1.1.1w 11 Sep 2023
[cli][debug] Streamlink: 6.7.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.7.22
[cli][debug] exceptiongroup: 1.1.3
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.18.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.10.3
[cli][debug] typing-extensions: 4.7.1
[cli][debug] urllib3: 2.0.4
[cli][debug] websocket-client: 1.6.2
[cli][debug] Arguments:
[cli][debug] url=https://www.media.gov.kw/LiveTV.aspx
[cli][debug] --loglevel=debug
error: No plugin can handle URL: https://www.media.gov.kw/LiveTV.aspx
```
</issue>
<code>
[start of src/streamlink/plugins/mangomolo.py]
1 """
2 $description OTT video platform owned by Alpha Technology Group
3 $url player.mangomolo.com
4 $url media.gov.kw
5 $type live
6 """
7
8 import logging
9 import re
10
11 from streamlink.exceptions import NoStreamsError
12 from streamlink.plugin import Plugin, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream import HLSStream
15 from streamlink.utils.url import update_scheme
16
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(
22 name="mangomoloplayer",
23 pattern=re.compile(r"https?://player\.mangomolo\.com/v1/"),
24 )
25 @pluginmatcher(
26 name="mediagovkw",
27 pattern=re.compile(r"https?://media\.gov\.kw/"),
28 )
29 class Mangomolo(Plugin):
30 def _get_player_url(self):
31 player_url = self.session.http.get(self.url, schema=validate.Schema(
32 validate.parse_html(),
33 validate.xml_xpath_string(".//iframe[contains(@src,'//player.mangomolo.com/v1/')][1]/@src"),
34 ))
35 if not player_url:
36 log.error("Could not find embedded player")
37 raise NoStreamsError
38
39 self.url = update_scheme("https://", player_url)
40
41 def _get_streams(self):
42 headers = {}
43 if not self.matches["mangomoloplayer"]:
44 headers["Referer"] = self.url
45 self._get_player_url()
46
47 hls_url = self.session.http.get(self.url, headers=headers, schema=validate.Schema(
48 re.compile(r"src\s*:\s*(?P<q>[\"'])(?P<url>https?://\S+?\.m3u8\S*?)(?P=q)"),
49 validate.none_or_all(validate.get("url")),
50 ))
51 if hls_url:
52 return HLSStream.parse_variant_playlist(self.session, hls_url)
53
54
55 __plugin__ = Mangomolo
56
[end of src/streamlink/plugins/mangomolo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/mangomolo.py b/src/streamlink/plugins/mangomolo.py
--- a/src/streamlink/plugins/mangomolo.py
+++ b/src/streamlink/plugins/mangomolo.py
@@ -24,7 +24,7 @@
)
@pluginmatcher(
name="mediagovkw",
- pattern=re.compile(r"https?://media\.gov\.kw/"),
+ pattern=re.compile(r"https?://(www\.)?media\.gov\.kw/"),
)
class Mangomolo(Plugin):
def _get_player_url(self):
|
{"golden_diff": "diff --git a/src/streamlink/plugins/mangomolo.py b/src/streamlink/plugins/mangomolo.py\n--- a/src/streamlink/plugins/mangomolo.py\n+++ b/src/streamlink/plugins/mangomolo.py\n@@ -24,7 +24,7 @@\n )\n @pluginmatcher(\n name=\"mediagovkw\",\n- pattern=re.compile(r\"https?://media\\.gov\\.kw/\"),\n+ pattern=re.compile(r\"https?://(www\\.)?media\\.gov\\.kw/\"),\n )\n class Mangomolo(Plugin):\n def _get_player_url(self):\n", "issue": "plugins.mangomolo: error: No plugin can handle URL\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\n[cli][info] Your Streamlink version (6.7.2) is up to date!\n\n### Description\n\nUnable to get stream for Kuwaiti channels.. error message: \"error: No plugin can handle URL:\"\r\nsample URLs:\r\n https://www.media.gov.kw/LiveTV.aspx\r\n https://www.media.gov.kw/LiveTV.aspx?PanChannel=Drama\r\n\n\n### Debug log\n\n```text\nuser@desktop:~ $ streamlink https://www.media.gov.kw/LiveTV.aspx --loglevel=debug\r\n[cli][debug] OS: Linux-6.1.21+-armv6l-with-glibc2.31\r\n[cli][debug] Python: 3.9.2\r\n[cli][debug] OpenSSL: OpenSSL 1.1.1w 11 Sep 2023\r\n[cli][debug] Streamlink: 6.7.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.7.22\r\n[cli][debug] exceptiongroup: 1.1.3\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.18.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.3\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 2.0.4\r\n[cli][debug] websocket-client: 1.6.2\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.media.gov.kw/LiveTV.aspx\r\n[cli][debug] --loglevel=debug\r\nerror: No plugin can handle URL: https://www.media.gov.kw/LiveTV.aspx\n```\n\n", "before_files": [{"content": "\"\"\"\n$description OTT video platform owned by Alpha Technology Group\n$url player.mangomolo.com\n$url media.gov.kw\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.exceptions import NoStreamsError\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(\n name=\"mangomoloplayer\",\n pattern=re.compile(r\"https?://player\\.mangomolo\\.com/v1/\"),\n)\n@pluginmatcher(\n name=\"mediagovkw\",\n pattern=re.compile(r\"https?://media\\.gov\\.kw/\"),\n)\nclass Mangomolo(Plugin):\n def _get_player_url(self):\n player_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//iframe[contains(@src,'//player.mangomolo.com/v1/')][1]/@src\"),\n ))\n if not player_url:\n log.error(\"Could not find embedded player\")\n raise NoStreamsError\n\n self.url = update_scheme(\"https://\", player_url)\n\n def _get_streams(self):\n headers = {}\n if not self.matches[\"mangomoloplayer\"]:\n headers[\"Referer\"] = self.url\n self._get_player_url()\n\n hls_url = self.session.http.get(self.url, headers=headers, schema=validate.Schema(\n re.compile(r\"src\\s*:\\s*(?P<q>[\\\"'])(?P<url>https?://\\S+?\\.m3u8\\S*?)(?P=q)\"),\n validate.none_or_all(validate.get(\"url\")),\n ))\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = Mangomolo\n", "path": "src/streamlink/plugins/mangomolo.py"}]}
| 1,717 | 126 |
gh_patches_debug_24491
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-6
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bootstrap database with default data for the /safe-apps endpoint
When the database is created and the service is running, we should offer the option/utility to bootstrap it with the default list of safe-apps.
Default database should contain the following data: https://github.com/gnosis/safe-apps-list/blob/development/public/gnosis-default.applist.json
</issue>
<code>
[start of src/safe_apps/models.py]
1 from django.contrib.postgres.fields import ArrayField
2 from django.db import models
3
4
5 class SafeApp(models.Model):
6 url = models.URLField()
7 name = models.CharField(max_length=200)
8 icon_url = models.URLField()
9 description = models.CharField(max_length=200)
10 networks = ArrayField(models.IntegerField())
11
[end of src/safe_apps/models.py]
[start of src/safe_apps/migrations/0001_initial.py]
1 # Generated by Django 3.2 on 2021-04-16 14:52
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 initial = True
10
11 dependencies = [
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='SafeApp',
17 fields=[
18 ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
19 ('url', models.URLField()),
20 ('name', models.CharField(max_length=200)),
21 ('icon_url', models.URLField()),
22 ('description', models.CharField(max_length=200)),
23 ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
24 ],
25 ),
26 ]
27
[end of src/safe_apps/migrations/0001_initial.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py
--- a/src/safe_apps/migrations/0001_initial.py
+++ b/src/safe_apps/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-# Generated by Django 3.2 on 2021-04-16 14:52
+# Generated by Django 3.2 on 2021-04-28 13:39
import django.contrib.postgres.fields
from django.db import migrations, models
@@ -15,8 +15,7 @@
migrations.CreateModel(
name='SafeApp',
fields=[
- ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('url', models.URLField()),
+ ('url', models.URLField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('icon_url', models.URLField()),
('description', models.CharField(max_length=200)),
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -3,7 +3,7 @@
class SafeApp(models.Model):
- url = models.URLField()
+ url = models.URLField(primary_key=True)
name = models.CharField(max_length=200)
icon_url = models.URLField()
description = models.CharField(max_length=200)
|
{"golden_diff": "diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py\n--- a/src/safe_apps/migrations/0001_initial.py\n+++ b/src/safe_apps/migrations/0001_initial.py\n@@ -1,4 +1,4 @@\n-# Generated by Django 3.2 on 2021-04-16 14:52\n+# Generated by Django 3.2 on 2021-04-28 13:39\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n@@ -15,8 +15,7 @@\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n- ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n- ('url', models.URLField()),\n+ ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\ndiff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -3,7 +3,7 @@\n \n \n class SafeApp(models.Model):\n- url = models.URLField()\n+ url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n", "issue": "Bootstrap database with default data for the /safe-apps endpoint \nWhen the database is created and the service is running, we should offer the option/utility to bootstrap it with the default list of safe-apps.\r\n\r\nDefault database should contain the following data: https://github.com/gnosis/safe-apps-list/blob/development/public/gnosis-default.applist.json\n", "before_files": [{"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass SafeApp(models.Model):\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-16 14:52\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('url', models.URLField()),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}]}
| 948 | 358 |
gh_patches_debug_38167
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-2031
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Self-Implemented Metrics Behaved Weirdly in DDP Mode
## 🐛 Bug
Self-implemented nDCG@5 is larger than nDCG@1 in DDP mode. What I was trying to do here is to record the sum of sample-wise scores **_s_** and the number of samples **_n_**. At the end of batch/epoch, I can obtain the mean of nDCG@k by dividing **_s_** by **_n_**.
My implementation looks very similar to the example on [IMPLEMENTING A METRIC](https://torchmetrics.readthedocs.io/en/stable/pages/implement.html). However, It failed in distributed settings.
`Epoch 8: 60%|██████ | 1541/2555 [04:58<03:16, 5.17it/s, loss=0.00354, v_num=213259, train_ndcg@1=0.656, train_ndcg@3=0.675, train_ndcg@5=0.698,...`
### To Reproduce
<details>
<summary>Self-implemented Metric</summary>
```python
from torchmetrics.functional.retrieval import retrieval_normalized_dcg
from torchmetrics import Metric
import torch
from torch import Tensor
class NDCG(Metric):
full_state_update = False
def __init__(self, top_k: int):
super().__init__()
self.top_k = top_k
self.add_state("ndcg", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("n", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor):
# preds and target are 2-D tensors.
assert preds.shape == target.shape
self.ndcg += torch.stack([retrieval_normalized_dcg(p, t, k=self.top_k) for p, t in zip(preds, target)]).sum()
self.n += len(preds)
def compute(self):
return self.ndcg / self.n
```
</details>
### Expected behavior
The documentation of self-implemented metric looked too simple and probably missed some details of distributed metrics.
I noticed that you used list and in `compute()` you call mean() to get the average score of a given batch. Can you please explain the difference between these two implementations?
### Environment
- TorchMetrics version (and how you installed TM, e.g. conda, pip, build from source):
0.10.3 (conda-forge)
- Python version:
3.10.12
PyTorch version (e.g., 1.0):
1.13.1
- Any other relevant information such as OS (e.g., Linux):
pytorch-lightning: 1.9.4
Linux: 5.15.0-78-generic
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/functional/retrieval/ndcg.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
20
21
22 def _dcg(target: Tensor) -> Tensor:
23 """Compute Discounted Cumulative Gain for input tensor."""
24 denom = torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0)
25 return (target / denom).sum(dim=-1)
26
27
28 def retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
29 """Compute `Normalized Discounted Cumulative Gain`_ (for information retrieval).
30
31 ``preds`` and ``target`` should be of the same shape and live on the same device.
32 ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,
33 otherwise an error is raised.
34
35 Args:
36 preds: estimated probabilities of each document to be relevant.
37 target: ground truth about each document relevance.
38 top_k: consider only the top k elements (default: ``None``, which considers them all)
39
40 Return:
41 A single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``.
42
43 Raises:
44 ValueError:
45 If ``top_k`` parameter is not `None` or an integer larger than 0
46
47 Example:
48 >>> from torchmetrics.functional.retrieval import retrieval_normalized_dcg
49 >>> preds = torch.tensor([.1, .2, .3, 4, 70])
50 >>> target = torch.tensor([10, 0, 0, 1, 5])
51 >>> retrieval_normalized_dcg(preds, target)
52 tensor(0.6957)
53
54 """
55 preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True)
56
57 top_k = preds.shape[-1] if top_k is None else top_k
58
59 if not (isinstance(top_k, int) and top_k > 0):
60 raise ValueError("`top_k` has to be a positive integer or None")
61
62 sorted_target = target[torch.argsort(preds, dim=-1, descending=True)][:top_k]
63 ideal_target = torch.sort(target, descending=True)[0][:top_k]
64
65 ideal_dcg = _dcg(ideal_target)
66 target_dcg = _dcg(sorted_target)
67
68 # filter undefined scores
69 all_irrelevant = ideal_dcg == 0
70 target_dcg[all_irrelevant] = 0
71 target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant]
72
73 return target_dcg.mean()
74
[end of src/torchmetrics/functional/retrieval/ndcg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/functional/retrieval/ndcg.py b/src/torchmetrics/functional/retrieval/ndcg.py
--- a/src/torchmetrics/functional/retrieval/ndcg.py
+++ b/src/torchmetrics/functional/retrieval/ndcg.py
@@ -19,10 +19,53 @@
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
-def _dcg(target: Tensor) -> Tensor:
- """Compute Discounted Cumulative Gain for input tensor."""
- denom = torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0)
- return (target / denom).sum(dim=-1)
+def _tie_average_dcg(target: Tensor, preds: Tensor, discount_cumsum: Tensor) -> Tensor:
+ """Translated version of sklearns `_tie_average_dcg` function.
+
+ Args:
+ target: ground truth about each document relevance.
+ preds: estimated probabilities of each document to be relevant.
+ discount_cumsum: cumulative sum of the discount.
+
+ Returns:
+ The cumulative gain of the tied elements.
+
+ """
+ _, inv, counts = torch.unique(-preds, return_inverse=True, return_counts=True)
+ ranked = torch.zeros_like(counts, dtype=torch.float32)
+ ranked.scatter_add_(0, inv, target.to(dtype=ranked.dtype))
+ ranked = ranked / counts
+ groups = counts.cumsum(dim=0) - 1
+ discount_sums = torch.zeros_like(counts, dtype=torch.float32)
+ discount_sums[0] = discount_cumsum[groups[0]]
+ discount_sums[1:] = discount_cumsum[groups].diff()
+ return (ranked * discount_sums).sum()
+
+
+def _dcg_sample_scores(target: Tensor, preds: Tensor, top_k: int, ignore_ties: bool) -> Tensor:
+ """Translated version of sklearns `_dcg_sample_scores` function.
+
+ Args:
+ target: ground truth about each document relevance.
+ preds: estimated probabilities of each document to be relevant.
+ top_k: consider only the top k elements
+ ignore_ties: If True, ties are ignored. If False, ties are averaged.
+
+ Returns:
+ The cumulative gain
+
+ """
+ discount = 1.0 / (torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0))
+ discount[top_k:] = 0.0
+
+ if ignore_ties:
+ ranking = preds.argsort(descending=True)
+ ranked = target[ranking]
+ cumulative_gain = (discount * ranked).sum()
+ else:
+ discount_cumsum = discount.cumsum(dim=-1)
+ cumulative_gain = _tie_average_dcg(target, preds, discount_cumsum)
+ return cumulative_gain
def retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:
@@ -59,15 +102,12 @@
if not (isinstance(top_k, int) and top_k > 0):
raise ValueError("`top_k` has to be a positive integer or None")
- sorted_target = target[torch.argsort(preds, dim=-1, descending=True)][:top_k]
- ideal_target = torch.sort(target, descending=True)[0][:top_k]
-
- ideal_dcg = _dcg(ideal_target)
- target_dcg = _dcg(sorted_target)
+ gain = _dcg_sample_scores(target, preds, top_k, ignore_ties=False)
+ normalized_gain = _dcg_sample_scores(target, target, top_k, ignore_ties=True)
# filter undefined scores
- all_irrelevant = ideal_dcg == 0
- target_dcg[all_irrelevant] = 0
- target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant]
+ all_irrelevant = normalized_gain == 0
+ gain[all_irrelevant] = 0
+ gain[~all_irrelevant] /= normalized_gain[~all_irrelevant]
- return target_dcg.mean()
+ return gain.mean()
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/retrieval/ndcg.py b/src/torchmetrics/functional/retrieval/ndcg.py\n--- a/src/torchmetrics/functional/retrieval/ndcg.py\n+++ b/src/torchmetrics/functional/retrieval/ndcg.py\n@@ -19,10 +19,53 @@\n from torchmetrics.utilities.checks import _check_retrieval_functional_inputs\n \n \n-def _dcg(target: Tensor) -> Tensor:\n- \"\"\"Compute Discounted Cumulative Gain for input tensor.\"\"\"\n- denom = torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0)\n- return (target / denom).sum(dim=-1)\n+def _tie_average_dcg(target: Tensor, preds: Tensor, discount_cumsum: Tensor) -> Tensor:\n+ \"\"\"Translated version of sklearns `_tie_average_dcg` function.\n+\n+ Args:\n+ target: ground truth about each document relevance.\n+ preds: estimated probabilities of each document to be relevant.\n+ discount_cumsum: cumulative sum of the discount.\n+\n+ Returns:\n+ The cumulative gain of the tied elements.\n+\n+ \"\"\"\n+ _, inv, counts = torch.unique(-preds, return_inverse=True, return_counts=True)\n+ ranked = torch.zeros_like(counts, dtype=torch.float32)\n+ ranked.scatter_add_(0, inv, target.to(dtype=ranked.dtype))\n+ ranked = ranked / counts\n+ groups = counts.cumsum(dim=0) - 1\n+ discount_sums = torch.zeros_like(counts, dtype=torch.float32)\n+ discount_sums[0] = discount_cumsum[groups[0]]\n+ discount_sums[1:] = discount_cumsum[groups].diff()\n+ return (ranked * discount_sums).sum()\n+\n+\n+def _dcg_sample_scores(target: Tensor, preds: Tensor, top_k: int, ignore_ties: bool) -> Tensor:\n+ \"\"\"Translated version of sklearns `_dcg_sample_scores` function.\n+\n+ Args:\n+ target: ground truth about each document relevance.\n+ preds: estimated probabilities of each document to be relevant.\n+ top_k: consider only the top k elements\n+ ignore_ties: If True, ties are ignored. If False, ties are averaged.\n+\n+ Returns:\n+ The cumulative gain\n+\n+ \"\"\"\n+ discount = 1.0 / (torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0))\n+ discount[top_k:] = 0.0\n+\n+ if ignore_ties:\n+ ranking = preds.argsort(descending=True)\n+ ranked = target[ranking]\n+ cumulative_gain = (discount * ranked).sum()\n+ else:\n+ discount_cumsum = discount.cumsum(dim=-1)\n+ cumulative_gain = _tie_average_dcg(target, preds, discount_cumsum)\n+ return cumulative_gain\n \n \n def retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:\n@@ -59,15 +102,12 @@\n if not (isinstance(top_k, int) and top_k > 0):\n raise ValueError(\"`top_k` has to be a positive integer or None\")\n \n- sorted_target = target[torch.argsort(preds, dim=-1, descending=True)][:top_k]\n- ideal_target = torch.sort(target, descending=True)[0][:top_k]\n-\n- ideal_dcg = _dcg(ideal_target)\n- target_dcg = _dcg(sorted_target)\n+ gain = _dcg_sample_scores(target, preds, top_k, ignore_ties=False)\n+ normalized_gain = _dcg_sample_scores(target, target, top_k, ignore_ties=True)\n \n # filter undefined scores\n- all_irrelevant = ideal_dcg == 0\n- target_dcg[all_irrelevant] = 0\n- target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant]\n+ all_irrelevant = normalized_gain == 0\n+ gain[all_irrelevant] = 0\n+ gain[~all_irrelevant] /= normalized_gain[~all_irrelevant]\n \n- return target_dcg.mean()\n+ return gain.mean()\n", "issue": "Self-Implemented Metrics Behaved Weirdly in DDP Mode\n## \ud83d\udc1b Bug\r\n\r\nSelf-implemented nDCG@5 is larger than nDCG@1 in DDP mode. What I was trying to do here is to record the sum of sample-wise scores **_s_** and the number of samples **_n_**. At the end of batch/epoch, I can obtain the mean of nDCG@k by dividing **_s_** by **_n_**.\r\n\r\nMy implementation looks very similar to the example on [IMPLEMENTING A METRIC](https://torchmetrics.readthedocs.io/en/stable/pages/implement.html). However, It failed in distributed settings.\r\n\r\n`Epoch 8: 60%|\u2588\u2588\u2588\u2588\u2588\u2588 | 1541/2555 [04:58<03:16, 5.17it/s, loss=0.00354, v_num=213259, train_ndcg@1=0.656, train_ndcg@3=0.675, train_ndcg@5=0.698,...`\r\n\r\n### To Reproduce\r\n\r\n<details>\r\n <summary>Self-implemented Metric</summary>\r\n\r\n```python\r\nfrom torchmetrics.functional.retrieval import retrieval_normalized_dcg\r\nfrom torchmetrics import Metric\r\nimport torch\r\nfrom torch import Tensor\r\n\r\nclass NDCG(Metric):\r\n full_state_update = False\r\n\r\n def __init__(self, top_k: int):\r\n super().__init__()\r\n self.top_k = top_k\r\n self.add_state(\"ndcg\", default=torch.tensor(0, dtype=torch.float), dist_reduce_fx=\"sum\")\r\n self.add_state(\"n\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\r\n\r\n def update(self, preds: Tensor, target: Tensor):\r\n # preds and target are 2-D tensors.\r\n assert preds.shape == target.shape\r\n\r\n self.ndcg += torch.stack([retrieval_normalized_dcg(p, t, k=self.top_k) for p, t in zip(preds, target)]).sum()\r\n self.n += len(preds)\r\n\r\n def compute(self):\r\n return self.ndcg / self.n\r\n```\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\nThe documentation of self-implemented metric looked too simple and probably missed some details of distributed metrics.\r\n\r\nI noticed that you used list and in `compute()` you call mean() to get the average score of a given batch. Can you please explain the difference between these two implementations?\r\n\r\n### Environment\r\n- TorchMetrics version (and how you installed TM, e.g. conda, pip, build from source):\r\n0.10.3 (conda-forge)\r\n- Python version:\r\n3.10.12\r\nPyTorch version (e.g., 1.0):\r\n1.13.1\r\n- Any other relevant information such as OS (e.g., Linux):\r\npytorch-lightning: 1.9.4\r\nLinux: 5.15.0-78-generic\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_retrieval_functional_inputs\n\n\ndef _dcg(target: Tensor) -> Tensor:\n \"\"\"Compute Discounted Cumulative Gain for input tensor.\"\"\"\n denom = torch.log2(torch.arange(target.shape[-1], device=target.device) + 2.0)\n return (target / denom).sum(dim=-1)\n\n\ndef retrieval_normalized_dcg(preds: Tensor, target: Tensor, top_k: Optional[int] = None) -> Tensor:\n \"\"\"Compute `Normalized Discounted Cumulative Gain`_ (for information retrieval).\n\n ``preds`` and ``target`` should be of the same shape and live on the same device.\n ``target`` must be either `bool` or `integers` and ``preds`` must be ``float``,\n otherwise an error is raised.\n\n Args:\n preds: estimated probabilities of each document to be relevant.\n target: ground truth about each document relevance.\n top_k: consider only the top k elements (default: ``None``, which considers them all)\n\n Return:\n A single-value tensor with the nDCG of the predictions ``preds`` w.r.t. the labels ``target``.\n\n Raises:\n ValueError:\n If ``top_k`` parameter is not `None` or an integer larger than 0\n\n Example:\n >>> from torchmetrics.functional.retrieval import retrieval_normalized_dcg\n >>> preds = torch.tensor([.1, .2, .3, 4, 70])\n >>> target = torch.tensor([10, 0, 0, 1, 5])\n >>> retrieval_normalized_dcg(preds, target)\n tensor(0.6957)\n\n \"\"\"\n preds, target = _check_retrieval_functional_inputs(preds, target, allow_non_binary_target=True)\n\n top_k = preds.shape[-1] if top_k is None else top_k\n\n if not (isinstance(top_k, int) and top_k > 0):\n raise ValueError(\"`top_k` has to be a positive integer or None\")\n\n sorted_target = target[torch.argsort(preds, dim=-1, descending=True)][:top_k]\n ideal_target = torch.sort(target, descending=True)[0][:top_k]\n\n ideal_dcg = _dcg(ideal_target)\n target_dcg = _dcg(sorted_target)\n\n # filter undefined scores\n all_irrelevant = ideal_dcg == 0\n target_dcg[all_irrelevant] = 0\n target_dcg[~all_irrelevant] /= ideal_dcg[~all_irrelevant]\n\n return target_dcg.mean()\n", "path": "src/torchmetrics/functional/retrieval/ndcg.py"}]}
| 2,066 | 943 |
gh_patches_debug_42582
|
rasdani/github-patches
|
git_diff
|
AlexsLemonade__refinebio-1890
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Quantpendia command timmed out for MUS_MUSCULUS
Error:
```
ubuntu@ip-10-0-127-71:~$ docker logs inspiring_goldberg
2019-11-11 22:18:45,252 i-04bb28b499152d24f [volume: -1] data_refinery_foreman.foreman.management.commands.create_quantpendia INFO [organism: HOMO_SAPIENS] [job_id: 29380081]: Sending compendia job for Organism
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
psycopg2.extensions.QueryCanceledError: canceling statement due to statement timeout
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "manage.py", line 23, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py", line 83, in handle
job = create_job_for_organism(organism)
File "/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py", line 34, in create_job_for_organism
data[experiment.accession_code] = list(samples_with_quantsf)
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 268, in __iter__
self._fetch_all()
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 1186, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/usr/local/lib/python3.5/dist-packages/django/db/models/query.py", line 176, in __iter__
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
File "/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py", line 1017, in results_iter
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
File "/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py", line 1065, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.5/dist-packages/raven/contrib/django/client.py", line 127, in execute
return real_execute(self, sql, params)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 68, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 77, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.5/dist-packages/django/db/utils.py", line 89, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py", line 85, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: canceling statement due to statement timeout
```
Worked for human.
</issue>
<code>
[start of foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py]
1 import sys
2
3 from django.core.management.base import BaseCommand
4
5 from data_refinery_common.job_lookup import ProcessorPipeline
6 from data_refinery_common.logging import get_and_configure_logger
7 from data_refinery_common.message_queue import send_job
8 from data_refinery_common.models import (Dataset, Experiment, Organism,
9 ProcessorJob,
10 ProcessorJobDatasetAssociation)
11 from data_refinery_common.utils import queryset_iterator
12
13 logger = get_and_configure_logger(__name__)
14
15
16 def create_job_for_organism(organism: Organism):
17 """Returns a quantpendia job for the provided organism."""
18 data = {}
19 experiments = Experiment.objects.filter(
20 organisms=organism,
21 samples__results__computedfile__filename='quant.sf'
22 )\
23 .distinct()
24
25 for experiment in queryset_iterator(experiments):
26 # only include the samples from the target organism that have quant.sf files
27 samples_with_quantsf = experiment.samples\
28 .filter(
29 organism=organism,
30 results__computedfile__filename='quant.sf'
31 )\
32 .values_list('accession_code', flat=True)\
33 .distinct()
34 data[experiment.accession_code] = list(samples_with_quantsf)
35
36 job = ProcessorJob()
37 job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
38 job.save()
39
40 dset = Dataset()
41 dset.data = data
42 dset.scale_by = 'NONE'
43 dset.aggregate_by = 'EXPERIMENT'
44 dset.quantile_normalize = False
45 dset.quant_sf_only = True
46 dset.svd_algorithm = 'NONE'
47 dset.save()
48
49 pjda = ProcessorJobDatasetAssociation()
50 pjda.processor_job = job
51 pjda.dataset = dset
52 pjda.save()
53
54 return job
55
56
57 class Command(BaseCommand):
58
59 def add_arguments(self, parser):
60 parser.add_argument(
61 "--organisms",
62 type=str,
63 help=("Comma separated list of organism names."))
64
65 def handle(self, *args, **options):
66 """Create a quantpendia for one or more organisms."""
67 all_organisms = Organism.objects.all().filter(qn_target__isnull=False)
68 if options["organisms"] is not None:
69 organisms = options["organisms"].upper().replace(" ", "_").split(",")
70 all_organisms = all_organisms.filter(name__in=organisms)
71
72 logger.debug('Generating quantpendia for organisms', organisms=all_organisms)
73
74 for organism in all_organisms:
75 # only generate the quantpendia for organisms that have some samples
76 # with quant.sf files.
77 has_quantsf_files = organism.sample_set\
78 .filter(results__computedfile__filename='quant.sf')\
79 .exists()
80 if not has_quantsf_files:
81 continue
82
83 job = create_job_for_organism(organism)
84 logger.info("Sending compendia job for Organism", job_id=str(job.pk), organism=str(organism))
85 send_job(ProcessorPipeline.CREATE_QUANTPENDIA, job)
86
87 sys.exit(0)
88
[end of foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py b/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py
--- a/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py
+++ b/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py
@@ -1,4 +1,5 @@
import sys
+import time
from django.core.management.base import BaseCommand
@@ -8,37 +9,19 @@
from data_refinery_common.models import (Dataset, Experiment, Organism,
ProcessorJob,
ProcessorJobDatasetAssociation)
-from data_refinery_common.utils import queryset_iterator
+from data_refinery_common.utils import queryset_page_iterator
logger = get_and_configure_logger(__name__)
def create_job_for_organism(organism: Organism):
"""Returns a quantpendia job for the provided organism."""
- data = {}
- experiments = Experiment.objects.filter(
- organisms=organism,
- samples__results__computedfile__filename='quant.sf'
- )\
- .distinct()
-
- for experiment in queryset_iterator(experiments):
- # only include the samples from the target organism that have quant.sf files
- samples_with_quantsf = experiment.samples\
- .filter(
- organism=organism,
- results__computedfile__filename='quant.sf'
- )\
- .values_list('accession_code', flat=True)\
- .distinct()
- data[experiment.accession_code] = list(samples_with_quantsf)
-
job = ProcessorJob()
job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value
job.save()
dset = Dataset()
- dset.data = data
+ dset.data = build_dataset(organism)
dset.scale_by = 'NONE'
dset.aggregate_by = 'EXPERIMENT'
dset.quantile_normalize = False
@@ -53,6 +36,42 @@
return job
+def build_dataset(organism: Organism):
+ data = {}
+ experiments = Experiment.objects.filter(
+ organisms=organism,
+ technology='RNA-SEQ',
+ )\
+ .distinct()
+
+ for experiment_page in queryset_page_iterator(experiments):
+ for experiment in experiment_page:
+ # only include the samples from the target organism that have quant.sf files
+ experiment_samples = experiment.samples\
+ .filter(organism=organism, technology='RNA-SEQ')
+ # split the query into two so to avoid timeouts.
+ # assume processed rna-seq samples have a quant.sf file
+ processed_samples_with_quantsf = experiment_samples\
+ .filter(is_processed=True)\
+ .values_list('accession_code', flat=True)
+ # and only check for quant file for unprocessed samples
+ unprocessed_samples_with_quantsf = experiment_samples\
+ .filter(
+ is_processed=False,
+ results__computedfile__filename='quant.sf'
+ )\
+ .values_list('accession_code', flat=True)\
+ .distinct()
+
+ sample_accession_codes = list(processed_samples_with_quantsf) \
+ + list(unprocessed_samples_with_quantsf)
+
+ if (sample_accession_codes):
+ data[experiment.accession_code] = sample_accession_codes
+
+ time.sleep(5)
+
+ return data
class Command(BaseCommand):
@@ -75,7 +94,7 @@
# only generate the quantpendia for organisms that have some samples
# with quant.sf files.
has_quantsf_files = organism.sample_set\
- .filter(results__computedfile__filename='quant.sf')\
+ .filter(technology='RNA-SEQ', results__computedfile__filename='quant.sf')\
.exists()
if not has_quantsf_files:
continue
|
{"golden_diff": "diff --git a/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py b/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py\n--- a/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py\n+++ b/foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py\n@@ -1,4 +1,5 @@\n import sys\n+import time\n \n from django.core.management.base import BaseCommand\n \n@@ -8,37 +9,19 @@\n from data_refinery_common.models import (Dataset, Experiment, Organism,\n ProcessorJob,\n ProcessorJobDatasetAssociation)\n-from data_refinery_common.utils import queryset_iterator\n+from data_refinery_common.utils import queryset_page_iterator\n \n logger = get_and_configure_logger(__name__)\n \n \n def create_job_for_organism(organism: Organism):\n \"\"\"Returns a quantpendia job for the provided organism.\"\"\"\n- data = {}\n- experiments = Experiment.objects.filter(\n- organisms=organism,\n- samples__results__computedfile__filename='quant.sf'\n- )\\\n- .distinct()\n-\n- for experiment in queryset_iterator(experiments):\n- # only include the samples from the target organism that have quant.sf files\n- samples_with_quantsf = experiment.samples\\\n- .filter(\n- organism=organism,\n- results__computedfile__filename='quant.sf'\n- )\\\n- .values_list('accession_code', flat=True)\\\n- .distinct()\n- data[experiment.accession_code] = list(samples_with_quantsf)\n-\n job = ProcessorJob()\n job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value\n job.save()\n \n dset = Dataset()\n- dset.data = data\n+ dset.data = build_dataset(organism)\n dset.scale_by = 'NONE'\n dset.aggregate_by = 'EXPERIMENT'\n dset.quantile_normalize = False\n@@ -53,6 +36,42 @@\n \n return job\n \n+def build_dataset(organism: Organism):\n+ data = {}\n+ experiments = Experiment.objects.filter(\n+ organisms=organism,\n+ technology='RNA-SEQ',\n+ )\\\n+ .distinct()\n+\n+ for experiment_page in queryset_page_iterator(experiments):\n+ for experiment in experiment_page:\n+ # only include the samples from the target organism that have quant.sf files\n+ experiment_samples = experiment.samples\\\n+ .filter(organism=organism, technology='RNA-SEQ')\n+ # split the query into two so to avoid timeouts.\n+ # assume processed rna-seq samples have a quant.sf file\n+ processed_samples_with_quantsf = experiment_samples\\\n+ .filter(is_processed=True)\\\n+ .values_list('accession_code', flat=True)\n+ # and only check for quant file for unprocessed samples\n+ unprocessed_samples_with_quantsf = experiment_samples\\\n+ .filter(\n+ is_processed=False,\n+ results__computedfile__filename='quant.sf'\n+ )\\\n+ .values_list('accession_code', flat=True)\\\n+ .distinct()\n+\n+ sample_accession_codes = list(processed_samples_with_quantsf) \\\n+ + list(unprocessed_samples_with_quantsf)\n+\n+ if (sample_accession_codes):\n+ data[experiment.accession_code] = sample_accession_codes\n+\n+ time.sleep(5)\n+\n+ return data\n \n class Command(BaseCommand):\n \n@@ -75,7 +94,7 @@\n # only generate the quantpendia for organisms that have some samples\n # with quant.sf files.\n has_quantsf_files = organism.sample_set\\\n- .filter(results__computedfile__filename='quant.sf')\\\n+ .filter(technology='RNA-SEQ', results__computedfile__filename='quant.sf')\\\n .exists()\n if not has_quantsf_files:\n continue\n", "issue": "Quantpendia command timmed out for MUS_MUSCULUS\nError:\r\n\r\n```\r\nubuntu@ip-10-0-127-71:~$ docker logs inspiring_goldberg\r\n2019-11-11 22:18:45,252 i-04bb28b499152d24f [volume: -1] data_refinery_foreman.foreman.management.commands.create_quantpendia INFO [organism: HOMO_SAPIENS] [job_id: 29380081]: Sending compendia job for Organism\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py\", line 85, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.extensions.QueryCanceledError: canceling statement due to statement timeout\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"manage.py\", line 23, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py\", line 375, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/core/management/base.py\", line 316, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/core/management/base.py\", line 353, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py\", line 83, in handle\r\n job = create_job_for_organism(organism)\r\n File \"/home/user/data_refinery_foreman/foreman/management/commands/create_quantpendia.py\", line 34, in create_job_for_organism\r\n data[experiment.accession_code] = list(samples_with_quantsf)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/models/query.py\", line 268, in __iter__\r\n self._fetch_all()\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/models/query.py\", line 1186, in _fetch_all\r\n self._result_cache = list(self._iterable_class(self))\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/models/query.py\", line 176, in __iter__\r\n for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py\", line 1017, in results_iter\r\n results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/models/sql/compiler.py\", line 1065, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.5/dist-packages/raven/contrib/django/client.py\", line 127, in execute\r\n return real_execute(self, sql, params)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py\", line 68, in execute\r\n return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py\", line 77, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py\", line 85, in _execute\r\n return self.cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/utils.py\", line 89, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/usr/local/lib/python3.5/dist-packages/django/db/backends/utils.py\", line 85, in _execute\r\n return self.cursor.execute(sql, params)\r\ndjango.db.utils.OperationalError: canceling statement due to statement timeout\r\n```\r\n\r\nWorked for human.\n", "before_files": [{"content": "import sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom data_refinery_common.job_lookup import ProcessorPipeline\nfrom data_refinery_common.logging import get_and_configure_logger\nfrom data_refinery_common.message_queue import send_job\nfrom data_refinery_common.models import (Dataset, Experiment, Organism,\n ProcessorJob,\n ProcessorJobDatasetAssociation)\nfrom data_refinery_common.utils import queryset_iterator\n\nlogger = get_and_configure_logger(__name__)\n\n\ndef create_job_for_organism(organism: Organism):\n \"\"\"Returns a quantpendia job for the provided organism.\"\"\"\n data = {}\n experiments = Experiment.objects.filter(\n organisms=organism,\n samples__results__computedfile__filename='quant.sf'\n )\\\n .distinct()\n\n for experiment in queryset_iterator(experiments):\n # only include the samples from the target organism that have quant.sf files\n samples_with_quantsf = experiment.samples\\\n .filter(\n organism=organism,\n results__computedfile__filename='quant.sf'\n )\\\n .values_list('accession_code', flat=True)\\\n .distinct()\n data[experiment.accession_code] = list(samples_with_quantsf)\n\n job = ProcessorJob()\n job.pipeline_applied = ProcessorPipeline.CREATE_QUANTPENDIA.value\n job.save()\n\n dset = Dataset()\n dset.data = data\n dset.scale_by = 'NONE'\n dset.aggregate_by = 'EXPERIMENT'\n dset.quantile_normalize = False\n dset.quant_sf_only = True\n dset.svd_algorithm = 'NONE'\n dset.save()\n\n pjda = ProcessorJobDatasetAssociation()\n pjda.processor_job = job\n pjda.dataset = dset\n pjda.save()\n\n return job\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--organisms\",\n type=str,\n help=(\"Comma separated list of organism names.\"))\n\n def handle(self, *args, **options):\n \"\"\"Create a quantpendia for one or more organisms.\"\"\"\n all_organisms = Organism.objects.all().filter(qn_target__isnull=False)\n if options[\"organisms\"] is not None:\n organisms = options[\"organisms\"].upper().replace(\" \", \"_\").split(\",\")\n all_organisms = all_organisms.filter(name__in=organisms)\n\n logger.debug('Generating quantpendia for organisms', organisms=all_organisms)\n\n for organism in all_organisms:\n # only generate the quantpendia for organisms that have some samples\n # with quant.sf files.\n has_quantsf_files = organism.sample_set\\\n .filter(results__computedfile__filename='quant.sf')\\\n .exists()\n if not has_quantsf_files:\n continue\n\n job = create_job_for_organism(organism)\n logger.info(\"Sending compendia job for Organism\", job_id=str(job.pk), organism=str(organism))\n send_job(ProcessorPipeline.CREATE_QUANTPENDIA, job)\n\n sys.exit(0)\n", "path": "foreman/data_refinery_foreman/foreman/management/commands/create_quantpendia.py"}]}
| 2,383 | 877 |
gh_patches_debug_23458
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-8202
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ticket Invoices - add invoice number
Add an invoice number to ticket invoices into the heading - same as in organizer invoices and show "app name" above the invoice.
Please implement this as follows:
* Display app name, e.g. eventyay on top
* Then show heading "Invoice" and invoice number
* Make the top heading "Invoice" translatable
* Add an invoice number behind "Invoice" in the following format: YYYYE-[eventID]000001 (show the actual event ID in the event ID area)
* Make the numbers according to the event only (e.g. if an organizer has two different events each event starts with 000001.
* On the first of January the format is starting again with 00001.

</issue>
<code>
[start of app/api/helpers/order.py]
1 import logging
2 from datetime import datetime, timedelta, timezone
3
4 from flask import render_template
5 from flask_rest_jsonapi.exceptions import ObjectNotFound
6
7 from app.api.helpers.db import (
8 get_count,
9 safe_query_without_soft_deleted_entries,
10 save_to_db,
11 )
12 from app.api.helpers.errors import ConflictError, UnprocessableEntityError
13 from app.api.helpers.files import create_save_pdf
14 from app.api.helpers.mail import (
15 send_email_to_attendees,
16 send_order_purchase_organizer_email,
17 )
18 from app.api.helpers.notification import (
19 notify_ticket_purchase_attendee,
20 notify_ticket_purchase_organizer,
21 )
22 from app.api.helpers.storage import UPLOAD_PATHS
23 from app.models import db
24 from app.models.order import OrderTicket
25 from app.models.ticket import Ticket
26 from app.models.ticket_fee import TicketFees
27 from app.models.ticket_holder import TicketHolder
28 from app.settings import get_settings
29
30
31 def delete_related_attendees_for_order(order):
32 """
33 Delete the associated attendees of an order when it is cancelled/deleted/expired
34 :param order: Order whose attendees have to be deleted.
35 :return:
36 """
37 for ticket_holder in order.ticket_holders:
38 db.session.delete(ticket_holder)
39 try:
40 db.session.commit()
41 except Exception:
42 logging.exception('DB Exception!')
43 db.session.rollback()
44
45
46 def set_expiry_for_order(order, override=False):
47 """
48 Expire the order after the time slot(10 minutes) if the order is initializing.
49 Also expires the order if we want to expire an order regardless of the state and time.
50 :param order: Order to be expired.
51 :param override: flag to force expiry.
52 :return:
53 """
54 order_expiry_time = get_settings()['order_expiry_time']
55 if (
56 order
57 and not order.paid_via
58 and (
59 override
60 or (
61 order.status == 'initializing'
62 and (order.created_at + timedelta(minutes=order_expiry_time))
63 < datetime.now(timezone.utc)
64 )
65 )
66 ):
67 order.status = 'expired'
68 delete_related_attendees_for_order(order)
69 save_to_db(order)
70 return order
71
72
73 def create_pdf_tickets_for_holder(order):
74 """
75 Create tickets and invoices for the holders of an order.
76 :param order: The order for which to create tickets for.
77 """
78 if order.status == 'completed' or order.status == 'placed':
79 pdf = create_save_pdf(
80 render_template('pdf/ticket_purchaser.html', order=order),
81 UPLOAD_PATHS['pdf']['tickets_all'],
82 dir_path='/static/uploads/pdf/tickets/',
83 identifier=order.identifier,
84 extra_identifiers={'extra_identifier': order.identifier},
85 upload_dir='generated/tickets/',
86 )
87
88 order.tickets_pdf_url = pdf
89
90 for holder in order.ticket_holders:
91 # create attendee pdf for every ticket holder
92 pdf = create_save_pdf(
93 render_template('pdf/ticket_attendee.html', order=order, holder=holder),
94 UPLOAD_PATHS['pdf']['tickets_all'],
95 dir_path='/static/uploads/pdf/tickets/',
96 identifier=order.identifier,
97 extra_identifiers={'extra_identifier': holder.id},
98 upload_dir='generated/tickets/',
99 )
100 holder.pdf_url = pdf
101 save_to_db(holder)
102
103 # create order invoices pdf
104 order_tickets = OrderTicket.query.filter_by(order_id=order.id).all()
105
106 create_save_pdf(
107 render_template(
108 'pdf/order_invoice.html',
109 order=order,
110 event=order.event,
111 tax=order.event.tax,
112 order_tickets=order_tickets,
113 ),
114 UPLOAD_PATHS['pdf']['order'],
115 dir_path='/static/uploads/pdf/tickets/',
116 identifier=order.identifier,
117 upload_dir='generated/invoices/',
118 new_renderer=True,
119 )
120 save_to_db(order)
121
122
123 def create_onsite_attendees_for_order(data):
124 """
125 Creates on site ticket holders for an order and adds it into the request data.
126 :param data: data initially passed in the POST request for order.
127 :return:
128 """
129 on_site_tickets = data.get('on_site_tickets')
130
131 if not on_site_tickets:
132 raise UnprocessableEntityError(
133 {'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing'
134 )
135
136 data['ticket_holders'] = []
137
138 for on_site_ticket in on_site_tickets:
139 ticket_id = on_site_ticket['id']
140 quantity = int(on_site_ticket['quantity'])
141
142 ticket = safe_query_without_soft_deleted_entries(
143 Ticket, 'id', ticket_id, 'ticket_id'
144 )
145
146 ticket_sold_count = get_count(
147 db.session.query(TicketHolder.id).filter_by(
148 ticket_id=int(ticket.id), deleted_at=None
149 )
150 )
151
152 # Check if the ticket is already sold out or not.
153 if ticket_sold_count + quantity > ticket.quantity:
154 # delete the already created attendees.
155 for holder in data['ticket_holders']:
156 ticket_holder = (
157 db.session.query(TicketHolder).filter(id == int(holder)).one()
158 )
159 db.session.delete(ticket_holder)
160 try:
161 db.session.commit()
162 except Exception:
163 logging.exception('DB Exception!')
164 db.session.rollback()
165
166 raise ConflictError(
167 {'pointer': '/data/attributes/on_site_tickets'},
168 "Ticket with id: {} already sold out. You can buy at most {} tickets".format(
169 ticket_id, ticket.quantity - ticket_sold_count
170 ),
171 )
172
173 for _ in range(1, quantity):
174 ticket_holder = TicketHolder(
175 firstname='onsite',
176 lastname='attendee',
177 email='[email protected]',
178 ticket_id=ticket.id,
179 event_id=data.get('event'),
180 )
181 save_to_db(ticket_holder)
182 data['ticket_holders'].append(ticket_holder.id)
183
184 # delete from the data.
185 del data['on_site_tickets']
186
187
188 def calculate_order_amount(tickets, discount_code=None):
189 from app.api.helpers.ticketing import validate_discount_code, validate_tickets
190 from app.models.discount_code import DiscountCode
191
192 ticket_ids = {ticket['id'] for ticket in tickets}
193 ticket_map = {int(ticket['id']): ticket for ticket in tickets}
194 fetched_tickets = validate_tickets(ticket_ids)
195
196 if tickets and discount_code:
197 discount_code = validate_discount_code(discount_code, tickets=tickets)
198
199 event = tax = tax_included = fees = None
200 total_amount = total_tax = total_discount = 0.0
201 ticket_list = []
202 for ticket in fetched_tickets:
203 ticket_info = ticket_map[ticket.id]
204 discount_amount = 0.0
205 discount_data = None
206 ticket_fee = 0.0
207
208 quantity = ticket_info.get('quantity', 1) # Default to single ticket
209 if not event:
210 event = ticket.event
211
212 if event.deleted_at:
213 raise ObjectNotFound(
214 {'pointer': 'tickets/event'}, f'Event: {event.id} not found'
215 )
216
217 fees = TicketFees.query.filter_by(currency=event.payment_currency).first()
218
219 if not tax and event.tax:
220 tax = event.tax
221 tax_included = tax.is_tax_included_in_price
222
223 if ticket.type == 'donation':
224 price = ticket_info.get('price')
225 if not price or price > ticket.max_price or price < ticket.min_price:
226 raise UnprocessableEntityError(
227 {'pointer': 'tickets/price'},
228 f"Price for donation ticket should be present and within range "
229 f"{ticket.min_price} to {ticket.max_price}",
230 )
231 else:
232 price = ticket.price if ticket.type != 'free' else 0.0
233
234 if discount_code and ticket.type != 'free':
235 code = (
236 DiscountCode.query.with_parent(ticket)
237 .filter_by(id=discount_code.id)
238 .first()
239 )
240 if code:
241 if discount_code.id == code.id:
242 if code.type == 'amount':
243 discount_amount = min(code.value, price)
244 discount_percent = (discount_amount / price) * 100
245 else:
246 discount_amount = (price * code.value) / 100
247 discount_percent = code.value
248 discount_data = {
249 'code': discount_code.code,
250 'percent': round(discount_percent, 2),
251 'amount': round(discount_amount, 2),
252 'total': round(discount_amount * quantity, 2),
253 }
254
255 total_discount += round(discount_amount * quantity, 2)
256 if fees and not ticket.is_fee_absorbed:
257 ticket_fee = fees.service_fee * (price * quantity) / 100
258 if ticket_fee > fees.maximum_fee:
259 ticket_fee = fees.maximum_fee
260 sub_total = ticket_fee + (price - discount_amount) * quantity
261 total_amount = total_amount + sub_total
262 ticket_list.append(
263 {
264 'id': ticket.id,
265 'name': ticket.name,
266 'price': price,
267 'quantity': quantity,
268 'discount': discount_data,
269 'ticket_fee': round(ticket_fee, 2),
270 'sub_total': round(sub_total, 2),
271 }
272 )
273
274 sub_total = total_amount
275 tax_dict = None
276 if tax:
277 if tax_included:
278 total_tax = total_amount - total_amount / (1 + tax.rate / 100)
279 else:
280 total_tax = total_amount * tax.rate / 100
281 total_amount += total_tax
282 tax_dict = dict(
283 included=tax_included,
284 amount=round(total_tax, 2),
285 percent=tax.rate if tax else 0.0,
286 name=tax.name,
287 )
288
289 return dict(
290 tax=tax_dict,
291 sub_total=round(sub_total, 2),
292 total=round(total_amount, 2),
293 discount=round(total_discount, 2),
294 tickets=ticket_list,
295 )
296
297
298 def on_order_completed(order):
299 # send e-mail and notifications if the order status is completed
300 if not (order.status == 'completed' or order.status == 'placed'):
301 return
302
303 create_pdf_tickets_for_holder(order)
304
305 # send email and notifications.
306 send_email_to_attendees(order)
307 notify_ticket_purchase_attendee(order)
308
309 if order.payment_mode in ['free', 'bank', 'cheque', 'onsite']:
310 order.completed_at = datetime.utcnow()
311
312 organizer_set = set(
313 filter(
314 bool, order.event.organizers + order.event.coorganizers + [order.event.owner]
315 )
316 )
317 send_order_purchase_organizer_email(order, organizer_set)
318 notify_ticket_purchase_organizer(order)
319
[end of app/api/helpers/order.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py
--- a/app/api/helpers/order.py
+++ b/app/api/helpers/order.py
@@ -25,6 +25,7 @@
from app.models.ticket import Ticket
from app.models.ticket_fee import TicketFees
from app.models.ticket_holder import TicketHolder
+from app.models.setting import Setting
from app.settings import get_settings
@@ -99,6 +100,8 @@
)
holder.pdf_url = pdf
save_to_db(holder)
+
+ admin_info = Setting.query.first()
# create order invoices pdf
order_tickets = OrderTicket.query.filter_by(order_id=order.id).all()
@@ -110,6 +113,7 @@
event=order.event,
tax=order.event.tax,
order_tickets=order_tickets,
+ admin_info=admin_info,
),
UPLOAD_PATHS['pdf']['order'],
dir_path='/static/uploads/pdf/tickets/',
|
{"golden_diff": "diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py\n--- a/app/api/helpers/order.py\n+++ b/app/api/helpers/order.py\n@@ -25,6 +25,7 @@\n from app.models.ticket import Ticket\n from app.models.ticket_fee import TicketFees\n from app.models.ticket_holder import TicketHolder\n+from app.models.setting import Setting\n from app.settings import get_settings\n \n \n@@ -99,6 +100,8 @@\n )\n holder.pdf_url = pdf\n save_to_db(holder)\n+ \n+ admin_info = Setting.query.first()\n \n # create order invoices pdf\n order_tickets = OrderTicket.query.filter_by(order_id=order.id).all()\n@@ -110,6 +113,7 @@\n event=order.event,\n tax=order.event.tax,\n order_tickets=order_tickets,\n+ admin_info=admin_info,\n ),\n UPLOAD_PATHS['pdf']['order'],\n dir_path='/static/uploads/pdf/tickets/',\n", "issue": "Ticket Invoices - add invoice number\nAdd an invoice number to ticket invoices into the heading - same as in organizer invoices and show \"app name\" above the invoice.\r\n\r\nPlease implement this as follows:\r\n\r\n* Display app name, e.g. eventyay on top\r\n* Then show heading \"Invoice\" and invoice number\r\n* Make the top heading \"Invoice\" translatable\r\n* Add an invoice number behind \"Invoice\" in the following format: YYYYE-[eventID]000001 (show the actual event ID in the event ID area)\r\n* Make the numbers according to the event only (e.g. if an organizer has two different events each event starts with 000001.\r\n* On the first of January the format is starting again with 00001.\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timedelta, timezone\n\nfrom flask import render_template\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.db import (\n get_count,\n safe_query_without_soft_deleted_entries,\n save_to_db,\n)\nfrom app.api.helpers.errors import ConflictError, UnprocessableEntityError\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.mail import (\n send_email_to_attendees,\n send_order_purchase_organizer_email,\n)\nfrom app.api.helpers.notification import (\n notify_ticket_purchase_attendee,\n notify_ticket_purchase_organizer,\n)\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.order import OrderTicket\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import TicketFees\nfrom app.models.ticket_holder import TicketHolder\nfrom app.settings import get_settings\n\n\ndef delete_related_attendees_for_order(order):\n \"\"\"\n Delete the associated attendees of an order when it is cancelled/deleted/expired\n :param order: Order whose attendees have to be deleted.\n :return:\n \"\"\"\n for ticket_holder in order.ticket_holders:\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception:\n logging.exception('DB Exception!')\n db.session.rollback()\n\n\ndef set_expiry_for_order(order, override=False):\n \"\"\"\n Expire the order after the time slot(10 minutes) if the order is initializing.\n Also expires the order if we want to expire an order regardless of the state and time.\n :param order: Order to be expired.\n :param override: flag to force expiry.\n :return:\n \"\"\"\n order_expiry_time = get_settings()['order_expiry_time']\n if (\n order\n and not order.paid_via\n and (\n override\n or (\n order.status == 'initializing'\n and (order.created_at + timedelta(minutes=order_expiry_time))\n < datetime.now(timezone.utc)\n )\n )\n ):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n return order\n\n\ndef create_pdf_tickets_for_holder(order):\n \"\"\"\n Create tickets and invoices for the holders of an order.\n :param order: The order for which to create tickets for.\n \"\"\"\n if order.status == 'completed' or order.status == 'placed':\n pdf = create_save_pdf(\n render_template('pdf/ticket_purchaser.html', order=order),\n UPLOAD_PATHS['pdf']['tickets_all'],\n dir_path='/static/uploads/pdf/tickets/',\n identifier=order.identifier,\n extra_identifiers={'extra_identifier': order.identifier},\n upload_dir='generated/tickets/',\n )\n\n order.tickets_pdf_url = pdf\n\n for holder in order.ticket_holders:\n # create attendee pdf for every ticket holder\n pdf = create_save_pdf(\n render_template('pdf/ticket_attendee.html', order=order, holder=holder),\n UPLOAD_PATHS['pdf']['tickets_all'],\n dir_path='/static/uploads/pdf/tickets/',\n identifier=order.identifier,\n extra_identifiers={'extra_identifier': holder.id},\n upload_dir='generated/tickets/',\n )\n holder.pdf_url = pdf\n save_to_db(holder)\n\n # create order invoices pdf\n order_tickets = OrderTicket.query.filter_by(order_id=order.id).all()\n\n create_save_pdf(\n render_template(\n 'pdf/order_invoice.html',\n order=order,\n event=order.event,\n tax=order.event.tax,\n order_tickets=order_tickets,\n ),\n UPLOAD_PATHS['pdf']['order'],\n dir_path='/static/uploads/pdf/tickets/',\n identifier=order.identifier,\n upload_dir='generated/invoices/',\n new_renderer=True,\n )\n save_to_db(order)\n\n\ndef create_onsite_attendees_for_order(data):\n \"\"\"\n Creates on site ticket holders for an order and adds it into the request data.\n :param data: data initially passed in the POST request for order.\n :return:\n \"\"\"\n on_site_tickets = data.get('on_site_tickets')\n\n if not on_site_tickets:\n raise UnprocessableEntityError(\n {'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing'\n )\n\n data['ticket_holders'] = []\n\n for on_site_ticket in on_site_tickets:\n ticket_id = on_site_ticket['id']\n quantity = int(on_site_ticket['quantity'])\n\n ticket = safe_query_without_soft_deleted_entries(\n Ticket, 'id', ticket_id, 'ticket_id'\n )\n\n ticket_sold_count = get_count(\n db.session.query(TicketHolder.id).filter_by(\n ticket_id=int(ticket.id), deleted_at=None\n )\n )\n\n # Check if the ticket is already sold out or not.\n if ticket_sold_count + quantity > ticket.quantity:\n # delete the already created attendees.\n for holder in data['ticket_holders']:\n ticket_holder = (\n db.session.query(TicketHolder).filter(id == int(holder)).one()\n )\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception:\n logging.exception('DB Exception!')\n db.session.rollback()\n\n raise ConflictError(\n {'pointer': '/data/attributes/on_site_tickets'},\n \"Ticket with id: {} already sold out. You can buy at most {} tickets\".format(\n ticket_id, ticket.quantity - ticket_sold_count\n ),\n )\n\n for _ in range(1, quantity):\n ticket_holder = TicketHolder(\n firstname='onsite',\n lastname='attendee',\n email='[email protected]',\n ticket_id=ticket.id,\n event_id=data.get('event'),\n )\n save_to_db(ticket_holder)\n data['ticket_holders'].append(ticket_holder.id)\n\n # delete from the data.\n del data['on_site_tickets']\n\n\ndef calculate_order_amount(tickets, discount_code=None):\n from app.api.helpers.ticketing import validate_discount_code, validate_tickets\n from app.models.discount_code import DiscountCode\n\n ticket_ids = {ticket['id'] for ticket in tickets}\n ticket_map = {int(ticket['id']): ticket for ticket in tickets}\n fetched_tickets = validate_tickets(ticket_ids)\n\n if tickets and discount_code:\n discount_code = validate_discount_code(discount_code, tickets=tickets)\n\n event = tax = tax_included = fees = None\n total_amount = total_tax = total_discount = 0.0\n ticket_list = []\n for ticket in fetched_tickets:\n ticket_info = ticket_map[ticket.id]\n discount_amount = 0.0\n discount_data = None\n ticket_fee = 0.0\n\n quantity = ticket_info.get('quantity', 1) # Default to single ticket\n if not event:\n event = ticket.event\n\n if event.deleted_at:\n raise ObjectNotFound(\n {'pointer': 'tickets/event'}, f'Event: {event.id} not found'\n )\n\n fees = TicketFees.query.filter_by(currency=event.payment_currency).first()\n\n if not tax and event.tax:\n tax = event.tax\n tax_included = tax.is_tax_included_in_price\n\n if ticket.type == 'donation':\n price = ticket_info.get('price')\n if not price or price > ticket.max_price or price < ticket.min_price:\n raise UnprocessableEntityError(\n {'pointer': 'tickets/price'},\n f\"Price for donation ticket should be present and within range \"\n f\"{ticket.min_price} to {ticket.max_price}\",\n )\n else:\n price = ticket.price if ticket.type != 'free' else 0.0\n\n if discount_code and ticket.type != 'free':\n code = (\n DiscountCode.query.with_parent(ticket)\n .filter_by(id=discount_code.id)\n .first()\n )\n if code:\n if discount_code.id == code.id:\n if code.type == 'amount':\n discount_amount = min(code.value, price)\n discount_percent = (discount_amount / price) * 100\n else:\n discount_amount = (price * code.value) / 100\n discount_percent = code.value\n discount_data = {\n 'code': discount_code.code,\n 'percent': round(discount_percent, 2),\n 'amount': round(discount_amount, 2),\n 'total': round(discount_amount * quantity, 2),\n }\n\n total_discount += round(discount_amount * quantity, 2)\n if fees and not ticket.is_fee_absorbed:\n ticket_fee = fees.service_fee * (price * quantity) / 100\n if ticket_fee > fees.maximum_fee:\n ticket_fee = fees.maximum_fee\n sub_total = ticket_fee + (price - discount_amount) * quantity\n total_amount = total_amount + sub_total\n ticket_list.append(\n {\n 'id': ticket.id,\n 'name': ticket.name,\n 'price': price,\n 'quantity': quantity,\n 'discount': discount_data,\n 'ticket_fee': round(ticket_fee, 2),\n 'sub_total': round(sub_total, 2),\n }\n )\n\n sub_total = total_amount\n tax_dict = None\n if tax:\n if tax_included:\n total_tax = total_amount - total_amount / (1 + tax.rate / 100)\n else:\n total_tax = total_amount * tax.rate / 100\n total_amount += total_tax\n tax_dict = dict(\n included=tax_included,\n amount=round(total_tax, 2),\n percent=tax.rate if tax else 0.0,\n name=tax.name,\n )\n\n return dict(\n tax=tax_dict,\n sub_total=round(sub_total, 2),\n total=round(total_amount, 2),\n discount=round(total_discount, 2),\n tickets=ticket_list,\n )\n\n\ndef on_order_completed(order):\n # send e-mail and notifications if the order status is completed\n if not (order.status == 'completed' or order.status == 'placed'):\n return\n\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications.\n send_email_to_attendees(order)\n notify_ticket_purchase_attendee(order)\n\n if order.payment_mode in ['free', 'bank', 'cheque', 'onsite']:\n order.completed_at = datetime.utcnow()\n\n organizer_set = set(\n filter(\n bool, order.event.organizers + order.event.coorganizers + [order.event.owner]\n )\n )\n send_order_purchase_organizer_email(order, organizer_set)\n notify_ticket_purchase_organizer(order)\n", "path": "app/api/helpers/order.py"}]}
| 3,925 | 212 |
gh_patches_debug_11601
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-596
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not hardcode directory to `rundir` for globus tokens
Currently the token file is hardcoded to `TOKEN_FILE = 'runinfo/.globus.json'`. This will break if the user uses a non-default rundir. I vote not to couple it to the rundir so that it can be re-used between different scripts with different rundirs without requiring re-authentication, for example, `$HOME/.parsl/globus.json`.
Do not hardcode directory to `rundir` for globus tokens
Currently the token file is hardcoded to `TOKEN_FILE = 'runinfo/.globus.json'`. This will break if the user uses a non-default rundir. I vote not to couple it to the rundir so that it can be re-used between different scripts with different rundirs without requiring re-authentication, for example, `$HOME/.parsl/globus.json`.
</issue>
<code>
[start of parsl/data_provider/globus.py]
1 import logging
2 import json
3 import globus_sdk
4
5
6 logger = logging.getLogger(__name__)
7 # Add StreamHandler to print error Globus events to stderr
8 handler = logging.StreamHandler()
9 handler.setLevel(logging.WARN)
10 format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s"
11 formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
12 handler.setFormatter(formatter)
13 logger.addHandler(handler)
14
15
16 """
17 'Parsl Application' OAuth2 client registered with Globus Auth
18 by [email protected]
19 """
20 CLIENT_ID = '8b8060fd-610e-4a74-885e-1051c71ad473'
21 REDIRECT_URI = 'https://auth.globus.org/v2/web/auth-code'
22 SCOPES = ('openid '
23 'urn:globus:auth:scope:transfer.api.globus.org:all')
24
25 TOKEN_FILE = 'runinfo/.globus.json'
26
27
28 get_input = getattr(__builtins__, 'raw_input', input)
29
30
31 def _load_tokens_from_file(filepath):
32 with open(filepath, 'r') as f:
33 tokens = json.load(f)
34 return tokens
35
36
37 def _save_tokens_to_file(filepath, tokens):
38 with open(filepath, 'w') as f:
39 json.dump(tokens, f)
40
41
42 def _update_tokens_file_on_refresh(token_response):
43 _save_tokens_to_file(TOKEN_FILE, token_response.by_resource_server)
44
45
46 def _do_native_app_authentication(client_id, redirect_uri,
47 requested_scopes=None):
48
49 client = globus_sdk.NativeAppAuthClient(client_id=client_id)
50 client.oauth2_start_flow(
51 requested_scopes=requested_scopes,
52 redirect_uri=redirect_uri,
53 refresh_tokens=True)
54
55 url = client.oauth2_get_authorize_url()
56 print('Please visit the following URL to provide authorization: \n{}'.format(url))
57 auth_code = get_input('Enter the auth code: ').strip()
58 token_response = client.oauth2_exchange_code_for_tokens(auth_code)
59 return token_response.by_resource_server
60
61
62 def _get_native_app_authorizer(client_id):
63 tokens = None
64 try:
65 tokens = _load_tokens_from_file(TOKEN_FILE)
66 except Exception:
67 pass
68
69 if not tokens:
70 tokens = _do_native_app_authentication(
71 client_id=client_id,
72 redirect_uri=REDIRECT_URI,
73 requested_scopes=SCOPES)
74 try:
75 _save_tokens_to_file(TOKEN_FILE, tokens)
76 except Exception:
77 pass
78
79 transfer_tokens = tokens['transfer.api.globus.org']
80
81 auth_client = globus_sdk.NativeAppAuthClient(client_id=client_id)
82
83 return globus_sdk.RefreshTokenAuthorizer(
84 transfer_tokens['refresh_token'],
85 auth_client,
86 access_token=transfer_tokens['access_token'],
87 expires_at=transfer_tokens['expires_at_seconds'],
88 on_refresh=_update_tokens_file_on_refresh)
89
90
91 def get_globus():
92 Globus.init()
93 return Globus()
94
95
96 class Globus(object):
97 """
98 All communication with the Globus Auth and Globus Transfer services is enclosed
99 in the Globus class. In particular, the Globus class is reponsible for:
100 - managing an OAuth2 authorizer - getting access and refresh tokens,
101 refreshing an access token, storing to and retrieving tokens from
102 .globus.json file,
103 - submitting file transfers,
104 - monitoring transfers.
105 """
106
107 authorizer = None
108
109 @classmethod
110 def init(cls):
111 if cls.authorizer:
112 return
113 cls.authorizer = _get_native_app_authorizer(CLIENT_ID)
114
115 @classmethod
116 def get_authorizer(cls):
117 return cls.authorizer
118
119 @classmethod
120 def transfer_file(cls, src_ep, dst_ep, src_path, dst_path):
121 tc = globus_sdk.TransferClient(authorizer=cls.authorizer)
122 td = globus_sdk.TransferData(tc, src_ep, dst_ep)
123 td.add_item(src_path, dst_path)
124 try:
125 task = tc.submit_transfer(td)
126 except Exception as e:
127 raise Exception('Globus transfer from {}{} to {}{} failed due to error: {}'.format(
128 src_ep, src_path, dst_ep, dst_path, e))
129
130 last_event_time = None
131 """
132 A Globus transfer job (task) can be in one of the three states: ACTIVE, SUCCEEDED, FAILED.
133 Parsl every 20 seconds polls a status of the transfer job (task) from the Globus Transfer service,
134 with 60 second timeout limit. If the task is ACTIVE after time runs out 'task_wait' returns False,
135 and True otherwise.
136 """
137 while not tc.task_wait(task['task_id'], 60, 15):
138 task = tc.get_task(task['task_id'])
139 # Get the last error Globus event
140 events = tc.task_event_list(task['task_id'], num_results=1, filter='is_error:1')
141 event = events.data[0]
142 # Print the error event to stderr and Parsl file log if it was not yet printed
143 if event['time'] != last_event_time:
144 last_event_time = event['time']
145 logger.warn('Non-critical Globus Transfer error event for globus://{}{}: "{}" at {}. Retrying...'.format(
146 src_ep, src_path, event['description'], event['time']))
147 logger.debug('Globus Transfer error details: {}'.format(event['details']))
148
149 """
150 The Globus transfer job (task) has been terminated (is not ACTIVE). Check if the transfer
151 SUCCEEDED or FAILED.
152 """
153 task = tc.get_task(task['task_id'])
154 if task['status'] == 'SUCCEEDED':
155 logger.debug('Globus transfer {}, from {}{} to {}{} succeeded'.format(
156 task['task_id'], src_ep, src_path, dst_ep, dst_path))
157 else:
158 logger.debug('Globus Transfer task: {}'.format(task))
159 events = tc.task_event_list(task['task_id'], num_results=1, filter='is_error:1')
160 event = events.data[0]
161 raise Exception('Globus transfer {}, from {}{} to {}{} failed due to error: "{}"'.format(
162 task['task_id'], src_ep, src_path, dst_ep, dst_path, event['details']))
163
[end of parsl/data_provider/globus.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/data_provider/globus.py b/parsl/data_provider/globus.py
--- a/parsl/data_provider/globus.py
+++ b/parsl/data_provider/globus.py
@@ -1,6 +1,7 @@
import logging
import json
import globus_sdk
+import os
logger = logging.getLogger(__name__)
@@ -22,8 +23,10 @@
SCOPES = ('openid '
'urn:globus:auth:scope:transfer.api.globus.org:all')
-TOKEN_FILE = 'runinfo/.globus.json'
-
+token_path = os.path.join(os.path.expanduser('~'), '.parsl')
+if not os.path.isdir(token_path):
+ os.mkdir(token_path)
+TOKEN_FILE = os.path.join(token_path, '.globus.json')
get_input = getattr(__builtins__, 'raw_input', input)
|
{"golden_diff": "diff --git a/parsl/data_provider/globus.py b/parsl/data_provider/globus.py\n--- a/parsl/data_provider/globus.py\n+++ b/parsl/data_provider/globus.py\n@@ -1,6 +1,7 @@\n import logging\n import json\n import globus_sdk\n+import os\n \n \n logger = logging.getLogger(__name__)\n@@ -22,8 +23,10 @@\n SCOPES = ('openid '\n 'urn:globus:auth:scope:transfer.api.globus.org:all')\n \n-TOKEN_FILE = 'runinfo/.globus.json'\n-\n+token_path = os.path.join(os.path.expanduser('~'), '.parsl')\n+if not os.path.isdir(token_path):\n+ os.mkdir(token_path)\n+TOKEN_FILE = os.path.join(token_path, '.globus.json')\n \n get_input = getattr(__builtins__, 'raw_input', input)\n", "issue": "Do not hardcode directory to `rundir` for globus tokens\nCurrently the token file is hardcoded to `TOKEN_FILE = 'runinfo/.globus.json'`. This will break if the user uses a non-default rundir. I vote not to couple it to the rundir so that it can be re-used between different scripts with different rundirs without requiring re-authentication, for example, `$HOME/.parsl/globus.json`.\nDo not hardcode directory to `rundir` for globus tokens\nCurrently the token file is hardcoded to `TOKEN_FILE = 'runinfo/.globus.json'`. This will break if the user uses a non-default rundir. I vote not to couple it to the rundir so that it can be re-used between different scripts with different rundirs without requiring re-authentication, for example, `$HOME/.parsl/globus.json`.\n", "before_files": [{"content": "import logging\nimport json\nimport globus_sdk\n\n\nlogger = logging.getLogger(__name__)\n# Add StreamHandler to print error Globus events to stderr\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.WARN)\nformat_string = \"%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s\"\nformatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\n\"\"\"\n'Parsl Application' OAuth2 client registered with Globus Auth\nby [email protected]\n\"\"\"\nCLIENT_ID = '8b8060fd-610e-4a74-885e-1051c71ad473'\nREDIRECT_URI = 'https://auth.globus.org/v2/web/auth-code'\nSCOPES = ('openid '\n 'urn:globus:auth:scope:transfer.api.globus.org:all')\n\nTOKEN_FILE = 'runinfo/.globus.json'\n\n\nget_input = getattr(__builtins__, 'raw_input', input)\n\n\ndef _load_tokens_from_file(filepath):\n with open(filepath, 'r') as f:\n tokens = json.load(f)\n return tokens\n\n\ndef _save_tokens_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)\n\n\ndef _update_tokens_file_on_refresh(token_response):\n _save_tokens_to_file(TOKEN_FILE, token_response.by_resource_server)\n\n\ndef _do_native_app_authentication(client_id, redirect_uri,\n requested_scopes=None):\n\n client = globus_sdk.NativeAppAuthClient(client_id=client_id)\n client.oauth2_start_flow(\n requested_scopes=requested_scopes,\n redirect_uri=redirect_uri,\n refresh_tokens=True)\n\n url = client.oauth2_get_authorize_url()\n print('Please visit the following URL to provide authorization: \\n{}'.format(url))\n auth_code = get_input('Enter the auth code: ').strip()\n token_response = client.oauth2_exchange_code_for_tokens(auth_code)\n return token_response.by_resource_server\n\n\ndef _get_native_app_authorizer(client_id):\n tokens = None\n try:\n tokens = _load_tokens_from_file(TOKEN_FILE)\n except Exception:\n pass\n\n if not tokens:\n tokens = _do_native_app_authentication(\n client_id=client_id,\n redirect_uri=REDIRECT_URI,\n requested_scopes=SCOPES)\n try:\n _save_tokens_to_file(TOKEN_FILE, tokens)\n except Exception:\n pass\n\n transfer_tokens = tokens['transfer.api.globus.org']\n\n auth_client = globus_sdk.NativeAppAuthClient(client_id=client_id)\n\n return globus_sdk.RefreshTokenAuthorizer(\n transfer_tokens['refresh_token'],\n auth_client,\n access_token=transfer_tokens['access_token'],\n expires_at=transfer_tokens['expires_at_seconds'],\n on_refresh=_update_tokens_file_on_refresh)\n\n\ndef get_globus():\n Globus.init()\n return Globus()\n\n\nclass Globus(object):\n \"\"\"\n All communication with the Globus Auth and Globus Transfer services is enclosed\n in the Globus class. In particular, the Globus class is reponsible for:\n - managing an OAuth2 authorizer - getting access and refresh tokens,\n refreshing an access token, storing to and retrieving tokens from\n .globus.json file,\n - submitting file transfers,\n - monitoring transfers.\n \"\"\"\n\n authorizer = None\n\n @classmethod\n def init(cls):\n if cls.authorizer:\n return\n cls.authorizer = _get_native_app_authorizer(CLIENT_ID)\n\n @classmethod\n def get_authorizer(cls):\n return cls.authorizer\n\n @classmethod\n def transfer_file(cls, src_ep, dst_ep, src_path, dst_path):\n tc = globus_sdk.TransferClient(authorizer=cls.authorizer)\n td = globus_sdk.TransferData(tc, src_ep, dst_ep)\n td.add_item(src_path, dst_path)\n try:\n task = tc.submit_transfer(td)\n except Exception as e:\n raise Exception('Globus transfer from {}{} to {}{} failed due to error: {}'.format(\n src_ep, src_path, dst_ep, dst_path, e))\n\n last_event_time = None\n \"\"\"\n A Globus transfer job (task) can be in one of the three states: ACTIVE, SUCCEEDED, FAILED.\n Parsl every 20 seconds polls a status of the transfer job (task) from the Globus Transfer service,\n with 60 second timeout limit. If the task is ACTIVE after time runs out 'task_wait' returns False,\n and True otherwise.\n \"\"\"\n while not tc.task_wait(task['task_id'], 60, 15):\n task = tc.get_task(task['task_id'])\n # Get the last error Globus event\n events = tc.task_event_list(task['task_id'], num_results=1, filter='is_error:1')\n event = events.data[0]\n # Print the error event to stderr and Parsl file log if it was not yet printed\n if event['time'] != last_event_time:\n last_event_time = event['time']\n logger.warn('Non-critical Globus Transfer error event for globus://{}{}: \"{}\" at {}. Retrying...'.format(\n src_ep, src_path, event['description'], event['time']))\n logger.debug('Globus Transfer error details: {}'.format(event['details']))\n\n \"\"\"\n The Globus transfer job (task) has been terminated (is not ACTIVE). Check if the transfer\n SUCCEEDED or FAILED.\n \"\"\"\n task = tc.get_task(task['task_id'])\n if task['status'] == 'SUCCEEDED':\n logger.debug('Globus transfer {}, from {}{} to {}{} succeeded'.format(\n task['task_id'], src_ep, src_path, dst_ep, dst_path))\n else:\n logger.debug('Globus Transfer task: {}'.format(task))\n events = tc.task_event_list(task['task_id'], num_results=1, filter='is_error:1')\n event = events.data[0]\n raise Exception('Globus transfer {}, from {}{} to {}{} failed due to error: \"{}\"'.format(\n task['task_id'], src_ep, src_path, dst_ep, dst_path, event['details']))\n", "path": "parsl/data_provider/globus.py"}]}
| 2,489 | 197 |
gh_patches_debug_20015
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-1660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Perl-5.28.0-GCCcore-7.3.0.eb not building due to perldoc sanity checks failing
```
$ eb --version
This is EasyBuild 3.8.1 (framework: 3.8.1, easyblocks: 3.8.1)
```
From [my build log](https://www.dropbox.com/s/0rmwuqpju9kfqiy/install_foss_2018b_toolchain_job.sge.o3588049.gz?dl=0) for Perl-5.28.0-GCCcore-7.3.0.eb the following looks like a major problem:
```
/usr/local/community/rse/EasyBuild/software/Perl/5.28.0-GCCcore-7.3.0/lib/perl5/5.28.0/xCouldn't copy cpan/podlators/blib/script/pod2man to /usr/local/scripts/pod2man: No such file or directory
```
I.e., it looks like it is trying to install stuff in the wrong place.
@boegel thinks the problem is that the Perl install process finds a `/usr/local/scripts` directory in my environment and incorrectly assumes that's where I'd like it to install scripts.
[More background info](https://openpkg-dev.openpkg.narkive.com/bGejYSaD/bugdb-perl-possible-build-problem-copying-into-usr-local-scripts-pr-133) (from 16 years ago!)
Suggested fix: add the following to the Perl easyconfig (not tested yet):
```python
configopts = "-Dscriptdirexp=%(installdir)s/bin"
```
NB I've not yet had chance to test this!
</issue>
<code>
[start of easybuild/easyblocks/p/perl.py]
1 ##
2 # Copyright 2009-2019 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for Perl, implemented as an easyblock
27
28 @author: Jens Timmerman (Ghent University)
29 @author: Kenneth Hoste (Ghent University)
30 """
31 import os
32
33 from easybuild.easyblocks.generic.configuremake import ConfigureMake
34 from easybuild.framework.easyconfig import CUSTOM
35 from easybuild.tools.run import run_cmd
36
37 # perldoc -lm seems to be the safest way to test if a module is available, based on exit code
38 EXTS_FILTER_PERL_MODULES = ("perldoc -lm %(ext_name)s ", "")
39
40
41 class EB_Perl(ConfigureMake):
42 """Support for building and installing Perl."""
43
44 @staticmethod
45 def extra_options():
46 """Add extra config options specific to Perl."""
47 extra_vars = {
48 'use_perl_threads': [True, "Enable use of internal Perl threads via -Dusethreads configure option", CUSTOM],
49 }
50 return ConfigureMake.extra_options(extra_vars)
51
52 def configure_step(self):
53 """
54 Configure Perl build: run ./Configure instead of ./configure with some different options
55 """
56 configopts = [
57 self.cfg['configopts'],
58 '-Dcc="{0}"'.format(os.getenv('CC')),
59 '-Dccflags="{0}"'.format(os.getenv('CFLAGS')),
60 '-Dinc_version_list=none',
61 ]
62 if self.cfg['use_perl_threads']:
63 configopts.append('-Dusethreads')
64
65 cmd = './Configure -de %s -Dprefix="%s"' % (' '.join(configopts), self.installdir)
66 run_cmd(cmd, log_all=True, simple=True)
67
68 def test_step(self):
69 """Test Perl build via 'make test'."""
70 # allow escaping with runtest = False
71 if self.cfg['runtest'] is None or self.cfg['runtest']:
72 if isinstance(self.cfg['runtest'], basestring):
73 cmd = "make %s" % self.cfg['runtest']
74 else:
75 cmd = "make test"
76
77 # specify locale to be used, to avoid that a handful of tests fail
78 cmd = "export LC_ALL=C && %s" % cmd
79
80 run_cmd(cmd, log_all=False, log_ok=False, simple=False)
81
82 def prepare_for_extensions(self):
83 """
84 Set default class and filter for Perl modules
85 """
86 # build and install additional modules with PerlModule easyblock
87 self.cfg['exts_defaultclass'] = "PerlModule"
88 self.cfg['exts_filter'] = EXTS_FILTER_PERL_MODULES
89
90 def sanity_check_step(self):
91 """Custom sanity check for Perl."""
92 majver = self.version.split('.')[0]
93 custom_paths = {
94 'files': [os.path.join('bin', x) for x in ['perl', 'perldoc']],
95 'dirs': ['lib/perl%s/%s' % (majver, self.version), 'man']
96 }
97 super(EB_Perl, self).sanity_check_step(custom_paths=custom_paths)
98
99
100 def get_major_perl_version():
101 """"
102 Returns the major verson of the perl binary in the current path
103 """
104 cmd = "perl -MConfig -e 'print $Config::Config{PERL_API_REVISION}'"
105 (perlmajver, _) = run_cmd(cmd, log_all=True, log_output=True, simple=False)
106 return perlmajver
107
108
109 def get_site_suffix(tag):
110 """
111 Returns the suffix for site* (e.g. sitearch, sitelib)
112 this will look something like /lib/perl5/site_perl/5.16.3/x86_64-linux-thread-multi
113 so, e.g. sitearch without site prefix
114
115 @tag: site tag to use, e.g. 'sitearch', 'sitelib'
116 """
117 perl_cmd = 'my $a = $Config::Config{"%s"}; $a =~ s/($Config::Config{"siteprefix"})//; print $a' % tag
118 cmd = "perl -MConfig -e '%s'" % perl_cmd
119 (sitesuffix, _) = run_cmd(cmd, log_all=True, log_output=True, simple=False)
120 # obtained value usually contains leading '/', so strip it off
121 return sitesuffix.lstrip(os.path.sep)
122
[end of easybuild/easyblocks/p/perl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/easybuild/easyblocks/p/perl.py b/easybuild/easyblocks/p/perl.py
--- a/easybuild/easyblocks/p/perl.py
+++ b/easybuild/easyblocks/p/perl.py
@@ -58,11 +58,19 @@
'-Dcc="{0}"'.format(os.getenv('CC')),
'-Dccflags="{0}"'.format(os.getenv('CFLAGS')),
'-Dinc_version_list=none',
+ '-Dprefix=%(installdir)s',
+ # guarantee that scripts are installed in /bin in the installation directory (and not in a guessed path)
+ # see https://github.com/easybuilders/easybuild-easyblocks/issues/1659
+ '-Dinstallscript=%(installdir)s/bin',
+ '-Dscriptdir=%(installdir)s/bin',
+ '-Dscriptdirexp=%(installdir)s/bin',
]
if self.cfg['use_perl_threads']:
configopts.append('-Dusethreads')
- cmd = './Configure -de %s -Dprefix="%s"' % (' '.join(configopts), self.installdir)
+ configopts = (' '.join(configopts)) % {'installdir': self.installdir}
+
+ cmd = './Configure -de %s' % configopts
run_cmd(cmd, log_all=True, simple=True)
def test_step(self):
|
{"golden_diff": "diff --git a/easybuild/easyblocks/p/perl.py b/easybuild/easyblocks/p/perl.py\n--- a/easybuild/easyblocks/p/perl.py\n+++ b/easybuild/easyblocks/p/perl.py\n@@ -58,11 +58,19 @@\n '-Dcc=\"{0}\"'.format(os.getenv('CC')),\n '-Dccflags=\"{0}\"'.format(os.getenv('CFLAGS')),\n '-Dinc_version_list=none',\n+ '-Dprefix=%(installdir)s',\n+ # guarantee that scripts are installed in /bin in the installation directory (and not in a guessed path)\n+ # see https://github.com/easybuilders/easybuild-easyblocks/issues/1659\n+ '-Dinstallscript=%(installdir)s/bin',\n+ '-Dscriptdir=%(installdir)s/bin',\n+ '-Dscriptdirexp=%(installdir)s/bin',\n ]\n if self.cfg['use_perl_threads']:\n configopts.append('-Dusethreads')\n \n- cmd = './Configure -de %s -Dprefix=\"%s\"' % (' '.join(configopts), self.installdir)\n+ configopts = (' '.join(configopts)) % {'installdir': self.installdir}\n+\n+ cmd = './Configure -de %s' % configopts\n run_cmd(cmd, log_all=True, simple=True)\n \n def test_step(self):\n", "issue": "Perl-5.28.0-GCCcore-7.3.0.eb not building due to perldoc sanity checks failing\n```\r\n$ eb --version\r\nThis is EasyBuild 3.8.1 (framework: 3.8.1, easyblocks: 3.8.1)\r\n```\r\n\r\nFrom [my build log](https://www.dropbox.com/s/0rmwuqpju9kfqiy/install_foss_2018b_toolchain_job.sge.o3588049.gz?dl=0) for Perl-5.28.0-GCCcore-7.3.0.eb the following looks like a major problem:\r\n```\r\n /usr/local/community/rse/EasyBuild/software/Perl/5.28.0-GCCcore-7.3.0/lib/perl5/5.28.0/xCouldn't copy cpan/podlators/blib/script/pod2man to /usr/local/scripts/pod2man: No such file or directory\r\n```\r\n\r\nI.e., it looks like it is trying to install stuff in the wrong place.\r\n\r\n@boegel thinks the problem is that the Perl install process finds a `/usr/local/scripts` directory in my environment and incorrectly assumes that's where I'd like it to install scripts. \r\n\r\n[More background info](https://openpkg-dev.openpkg.narkive.com/bGejYSaD/bugdb-perl-possible-build-problem-copying-into-usr-local-scripts-pr-133) (from 16 years ago!)\r\n\r\nSuggested fix: add the following to the Perl easyconfig (not tested yet):\r\n```python\r\nconfigopts = \"-Dscriptdirexp=%(installdir)s/bin\"\r\n```\r\n\r\nNB I've not yet had chance to test this! \n", "before_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for Perl, implemented as an easyblock\n\n@author: Jens Timmerman (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.run import run_cmd\n\n# perldoc -lm seems to be the safest way to test if a module is available, based on exit code\nEXTS_FILTER_PERL_MODULES = (\"perldoc -lm %(ext_name)s \", \"\")\n\n\nclass EB_Perl(ConfigureMake):\n \"\"\"Support for building and installing Perl.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Add extra config options specific to Perl.\"\"\"\n extra_vars = {\n 'use_perl_threads': [True, \"Enable use of internal Perl threads via -Dusethreads configure option\", CUSTOM],\n }\n return ConfigureMake.extra_options(extra_vars)\n\n def configure_step(self):\n \"\"\"\n Configure Perl build: run ./Configure instead of ./configure with some different options\n \"\"\"\n configopts = [\n self.cfg['configopts'],\n '-Dcc=\"{0}\"'.format(os.getenv('CC')),\n '-Dccflags=\"{0}\"'.format(os.getenv('CFLAGS')),\n '-Dinc_version_list=none',\n ]\n if self.cfg['use_perl_threads']:\n configopts.append('-Dusethreads')\n\n cmd = './Configure -de %s -Dprefix=\"%s\"' % (' '.join(configopts), self.installdir)\n run_cmd(cmd, log_all=True, simple=True)\n\n def test_step(self):\n \"\"\"Test Perl build via 'make test'.\"\"\"\n # allow escaping with runtest = False\n if self.cfg['runtest'] is None or self.cfg['runtest']:\n if isinstance(self.cfg['runtest'], basestring):\n cmd = \"make %s\" % self.cfg['runtest']\n else:\n cmd = \"make test\"\n\n # specify locale to be used, to avoid that a handful of tests fail\n cmd = \"export LC_ALL=C && %s\" % cmd\n\n run_cmd(cmd, log_all=False, log_ok=False, simple=False)\n\n def prepare_for_extensions(self):\n \"\"\"\n Set default class and filter for Perl modules\n \"\"\"\n # build and install additional modules with PerlModule easyblock\n self.cfg['exts_defaultclass'] = \"PerlModule\"\n self.cfg['exts_filter'] = EXTS_FILTER_PERL_MODULES\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Perl.\"\"\"\n majver = self.version.split('.')[0]\n custom_paths = {\n 'files': [os.path.join('bin', x) for x in ['perl', 'perldoc']],\n 'dirs': ['lib/perl%s/%s' % (majver, self.version), 'man']\n }\n super(EB_Perl, self).sanity_check_step(custom_paths=custom_paths)\n\n\ndef get_major_perl_version():\n \"\"\"\"\n Returns the major verson of the perl binary in the current path\n \"\"\"\n cmd = \"perl -MConfig -e 'print $Config::Config{PERL_API_REVISION}'\"\n (perlmajver, _) = run_cmd(cmd, log_all=True, log_output=True, simple=False)\n return perlmajver\n\n\ndef get_site_suffix(tag):\n \"\"\"\n Returns the suffix for site* (e.g. sitearch, sitelib)\n this will look something like /lib/perl5/site_perl/5.16.3/x86_64-linux-thread-multi\n so, e.g. sitearch without site prefix\n\n @tag: site tag to use, e.g. 'sitearch', 'sitelib'\n \"\"\"\n perl_cmd = 'my $a = $Config::Config{\"%s\"}; $a =~ s/($Config::Config{\"siteprefix\"})//; print $a' % tag\n cmd = \"perl -MConfig -e '%s'\" % perl_cmd\n (sitesuffix, _) = run_cmd(cmd, log_all=True, log_output=True, simple=False)\n # obtained value usually contains leading '/', so strip it off\n return sitesuffix.lstrip(os.path.sep)\n", "path": "easybuild/easyblocks/p/perl.py"}]}
| 2,370 | 318 |
gh_patches_debug_23010
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-67
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add django-debug-toolbar for debugging
</issue>
<code>
[start of csunplugged/config/settings.py]
1 """
2 Django settings for csunplugged project.
3
4 Generated by 'django-admin startproject' using Django 1.10.3.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.10/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.10/ref/settings/
11 """
12
13 import os
14 from config.settings_secret import *
15
16 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
17 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
18
19 # nasty hard coding
20 SETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))
21
22
23 # Quick-start development settings - unsuitable for production
24 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
25
26 # SECURITY WARNING: keep the secret key used in production secret!
27 SECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'
28
29 # SECURITY WARNING: don't run with debug turned on in production!
30 DEBUG = True
31
32 ALLOWED_HOSTS = []
33
34
35 # Application definition
36
37 INSTALLED_APPS = [
38 'general.apps.GeneralConfig',
39 'topics.apps.TopicsConfig',
40 'resources.apps.ResourcesConfig',
41 'django.contrib.admin',
42 'django.contrib.auth',
43 'django.contrib.contenttypes',
44 'django.contrib.sessions',
45 'django.contrib.messages',
46 'django.contrib.staticfiles',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.locale.LocaleMiddleware',
53 'django.middleware.common.CommonMiddleware',
54 'django.middleware.csrf.CsrfViewMiddleware',
55 'django.contrib.auth.middleware.AuthenticationMiddleware',
56 'django.contrib.messages.middleware.MessageMiddleware',
57 'django.middleware.clickjacking.XFrameOptionsMiddleware',
58 ]
59
60 ROOT_URLCONF = 'config.urls'
61
62 TEMPLATES = [
63 {
64 'BACKEND': 'django.template.backends.django.DjangoTemplates',
65 'DIRS': [
66 os.path.join(SETTINGS_PATH, 'templates'),
67 os.path.join(SETTINGS_PATH, 'resources/content/')
68 ],
69 'APP_DIRS': True,
70 'OPTIONS': {
71 'context_processors': [
72 'django.template.context_processors.debug',
73 'django.template.context_processors.request',
74 'django.contrib.auth.context_processors.auth',
75 'django.contrib.messages.context_processors.messages',
76 ],
77 },
78 },
79 ]
80
81 WSGI_APPLICATION = 'config.wsgi.application'
82
83
84 # Database
85 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
86 # Database values are stored in `settings_secret.py`
87 # A template of this file is available as `settings_secret_template.py`
88
89
90 # Password validation
91 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
92
93 AUTH_PASSWORD_VALIDATORS = [
94 {
95 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
96 },
97 {
98 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
99 },
100 {
101 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
105 },
106 ]
107
108
109 # Internationalization
110 # https://docs.djangoproject.com/en/1.10/topics/i18n/
111
112 LANGUAGE_CODE = 'en-us'
113
114 TIME_ZONE = 'UTC'
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122 LOCALE_PATHS = ['locale']
123
124 # Static files (CSS, JavaScript, Images)
125 # https://docs.djangoproject.com/en/1.10/howto/static-files/
126
127 STATIC_URL = '/static/'
128 STATICFILES_DIRS = (
129 os.path.join(BASE_DIR, 'build'),
130 )
131
[end of csunplugged/config/settings.py]
[start of csunplugged/config/urls.py]
1 """csunplugged URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/1.10/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.conf.urls import url, include
14 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
15 """
16 from django.conf.urls import include, url
17 from django.conf.urls.i18n import i18n_patterns
18 from django.contrib import admin
19 from django.conf import settings
20 from django.conf.urls.static import static
21
22 urlpatterns = i18n_patterns(
23 url(r'', include('general.urls', namespace='general')),
24 url(r'^topics/', include('topics.urls', namespace='topics')),
25 url(r'^resources/', include('resources.urls', namespace='resources')),
26 url(r'^admin/', include(admin.site.urls)),
27 )
28 # ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)
29
[end of csunplugged/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py
--- a/csunplugged/config/settings.py
+++ b/csunplugged/config/settings.py
@@ -44,9 +44,11 @@
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
+ 'debug_toolbar',
]
MIDDLEWARE = [
+ 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
@@ -128,3 +130,7 @@
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'build'),
)
+
+# Internal IPs for Django Debug Toolbar
+# https://docs.djangoproject.com/en/1.10/ref/settings/#internal-ips
+INTERNAL_IPS = ['127.0.0.1']
diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py
--- a/csunplugged/config/urls.py
+++ b/csunplugged/config/urls.py
@@ -26,3 +26,9 @@
url(r'^admin/', include(admin.site.urls)),
)
# ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)
+
+if settings.DEBUG:
+ import debug_toolbar
+ urlpatterns += [
+ url(r'^__debug__/', include(debug_toolbar.urls)),
+ ]
|
{"golden_diff": "diff --git a/csunplugged/config/settings.py b/csunplugged/config/settings.py\n--- a/csunplugged/config/settings.py\n+++ b/csunplugged/config/settings.py\n@@ -44,9 +44,11 @@\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n+ 'debug_toolbar',\n ]\n \n MIDDLEWARE = [\n+ 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n@@ -128,3 +130,7 @@\n STATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'build'),\n )\n+\n+# Internal IPs for Django Debug Toolbar\n+# https://docs.djangoproject.com/en/1.10/ref/settings/#internal-ips\n+INTERNAL_IPS = ['127.0.0.1']\ndiff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py\n--- a/csunplugged/config/urls.py\n+++ b/csunplugged/config/urls.py\n@@ -26,3 +26,9 @@\n url(r'^admin/', include(admin.site.urls)),\n )\n # ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)\n+\n+if settings.DEBUG:\n+ import debug_toolbar\n+ urlpatterns += [\n+ url(r'^__debug__/', include(debug_toolbar.urls)),\n+ ]\n", "issue": "Add django-debug-toolbar for debugging\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for csunplugged project.\n\nGenerated by 'django-admin startproject' using Django 1.10.3.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport os\nfrom config.settings_secret import *\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# nasty hard coding\nSETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'general.apps.GeneralConfig',\n 'topics.apps.TopicsConfig',\n 'resources.apps.ResourcesConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(SETTINGS_PATH, 'templates'),\n os.path.join(SETTINGS_PATH, 'resources/content/')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n# Database values are stored in `settings_secret.py`\n# A template of this file is available as `settings_secret_template.py`\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = ['locale']\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'build'),\n )\n", "path": "csunplugged/config/settings.py"}, {"content": "\"\"\"csunplugged URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = i18n_patterns(\n url(r'', include('general.urls', namespace='general')),\n url(r'^topics/', include('topics.urls', namespace='topics')),\n url(r'^resources/', include('resources.urls', namespace='resources')),\n url(r'^admin/', include(admin.site.urls)),\n)\n# ] + static(settings.STATIC_URL, documnet_root=settings.STATIC_ROOT)\n", "path": "csunplugged/config/urls.py"}]}
| 2,027 | 315 |
gh_patches_debug_33414
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-6733
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Caffe Nero GB spider using outdated JSON file
The caffe_nero_gb.py spider gets its data from JSON file that the Store Finder page at https://caffenero.com/uk/stores/ uses to display its map. However, it looks like that URL of that JSON file has changed, and ATP is still referencing the old (and no longer updated one).
The ATP code currently has
`allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]`
`start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]`
But the URL referenced by https://caffenero.com/uk/stores/ is now
https://caffenerowebsite.blob.core.windows.net/production/data/stores/stores-gb.json
I think the format of the JSON file has remained the same, so it should just be a matter of swapping the URLs over.
To help issues like this be picked up sooner in the future, I wonder if there's a way of checking that the JSON URL used is still included in the https://caffenero.com/uk/stores/ page, and producing a warning to anyone running ATP if not?
</issue>
<code>
[start of locations/spiders/caffe_nero_gb.py]
1 from scrapy import Spider
2 from scrapy.http import JsonRequest
3
4 from locations.categories import Categories, Extras, apply_category, apply_yes_no
5 from locations.dict_parser import DictParser
6 from locations.hours import OpeningHours
7
8
9 class CaffeNeroGBSpider(Spider):
10 name = "caffe_nero_gb"
11 item_attributes = {"brand": "Caffe Nero", "brand_wikidata": "Q675808"}
12 allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]
13 start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]
14
15 def start_requests(self):
16 for url in self.start_urls:
17 yield JsonRequest(url=url)
18
19 def parse(self, response):
20 for location in response.json()["features"]:
21 if (
22 not location["properties"]["status"]["open"]
23 or location["properties"]["status"]["opening_soon"]
24 or location["properties"]["status"]["temp_closed"]
25 ):
26 continue
27
28 item = DictParser.parse(location["properties"])
29 item["geometry"] = location["geometry"]
30 if location["properties"]["status"]["express"]:
31 item["brand"] = "Nero Express"
32
33 item["opening_hours"] = OpeningHours()
34 for day_name, day_hours in location["properties"]["hoursRegular"].items():
35 if day_hours["open"] == "closed" or day_hours["close"] == "closed":
36 continue
37 if day_name == "holiday":
38 continue
39 item["opening_hours"].add_range(day_name.title(), day_hours["open"], day_hours["close"])
40
41 apply_yes_no(Extras.TAKEAWAY, item, location["properties"]["status"]["takeaway"], False)
42 apply_yes_no(Extras.DELIVERY, item, location["properties"]["status"]["delivery"], False)
43 apply_yes_no(Extras.WIFI, item, location["properties"]["amenities"]["wifi"], False)
44 apply_yes_no(Extras.TOILETS, item, location["properties"]["amenities"]["toilet"], False)
45 apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location["properties"]["amenities"]["baby_change"], False)
46 apply_yes_no(Extras.SMOKING_AREA, item, location["properties"]["amenities"]["smoking_area"], False)
47 apply_yes_no(Extras.AIR_CONDITIONING, item, location["properties"]["amenities"]["air_conditioned"], False)
48 apply_yes_no(Extras.WHEELCHAIR, item, location["properties"]["amenities"].get("disabled_access"), False)
49 apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location["properties"]["amenities"]["disabled_toilet"], False)
50 apply_yes_no(Extras.OUTDOOR_SEATING, item, location["properties"]["amenities"]["outside_seating"], False)
51 apply_category(Categories.COFFEE_SHOP, item)
52
53 item["website"] = f'https://caffenero.com/uk/store/{location["properties"]["slug"]}/'
54
55 yield item
56
[end of locations/spiders/caffe_nero_gb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/caffe_nero_gb.py b/locations/spiders/caffe_nero_gb.py
--- a/locations/spiders/caffe_nero_gb.py
+++ b/locations/spiders/caffe_nero_gb.py
@@ -1,5 +1,8 @@
+import re
+from typing import Any
+
from scrapy import Spider
-from scrapy.http import JsonRequest
+from scrapy.http import JsonRequest, Response
from locations.categories import Categories, Extras, apply_category, apply_yes_no
from locations.dict_parser import DictParser
@@ -9,14 +12,15 @@
class CaffeNeroGBSpider(Spider):
name = "caffe_nero_gb"
item_attributes = {"brand": "Caffe Nero", "brand_wikidata": "Q675808"}
- allowed_domains = ["caffenero-webassets-production.s3.eu-west-2.amazonaws.com"]
- start_urls = ["https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json"]
+ allowed_domains = ["caffenero.com", "caffenerowebsite.blob.core.windows.net"]
+ start_urls = ["https://caffenero.com/uk/stores/"]
- def start_requests(self):
- for url in self.start_urls:
- yield JsonRequest(url=url)
+ def parse(self, response: Response, **kwargs: Any) -> Any:
+ yield JsonRequest(
+ re.search(r"loadGeoJson\(\n\s+'(https://.+)', {", response.text).group(1), callback=self.parse_geojson
+ )
- def parse(self, response):
+ def parse_geojson(self, response: Response, **kwargs: Any) -> Any:
for location in response.json()["features"]:
if (
not location["properties"]["status"]["open"]
@@ -30,6 +34,8 @@
if location["properties"]["status"]["express"]:
item["brand"] = "Nero Express"
+ item["branch"] = item.pop("name")
+
item["opening_hours"] = OpeningHours()
for day_name, day_hours in location["properties"]["hoursRegular"].items():
if day_hours["open"] == "closed" or day_hours["close"] == "closed":
|
{"golden_diff": "diff --git a/locations/spiders/caffe_nero_gb.py b/locations/spiders/caffe_nero_gb.py\n--- a/locations/spiders/caffe_nero_gb.py\n+++ b/locations/spiders/caffe_nero_gb.py\n@@ -1,5 +1,8 @@\n+import re\n+from typing import Any\n+\n from scrapy import Spider\n-from scrapy.http import JsonRequest\n+from scrapy.http import JsonRequest, Response\n \n from locations.categories import Categories, Extras, apply_category, apply_yes_no\n from locations.dict_parser import DictParser\n@@ -9,14 +12,15 @@\n class CaffeNeroGBSpider(Spider):\n name = \"caffe_nero_gb\"\n item_attributes = {\"brand\": \"Caffe Nero\", \"brand_wikidata\": \"Q675808\"}\n- allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]\n- start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]\n+ allowed_domains = [\"caffenero.com\", \"caffenerowebsite.blob.core.windows.net\"]\n+ start_urls = [\"https://caffenero.com/uk/stores/\"]\n \n- def start_requests(self):\n- for url in self.start_urls:\n- yield JsonRequest(url=url)\n+ def parse(self, response: Response, **kwargs: Any) -> Any:\n+ yield JsonRequest(\n+ re.search(r\"loadGeoJson\\(\\n\\s+'(https://.+)', {\", response.text).group(1), callback=self.parse_geojson\n+ )\n \n- def parse(self, response):\n+ def parse_geojson(self, response: Response, **kwargs: Any) -> Any:\n for location in response.json()[\"features\"]:\n if (\n not location[\"properties\"][\"status\"][\"open\"]\n@@ -30,6 +34,8 @@\n if location[\"properties\"][\"status\"][\"express\"]:\n item[\"brand\"] = \"Nero Express\"\n \n+ item[\"branch\"] = item.pop(\"name\")\n+\n item[\"opening_hours\"] = OpeningHours()\n for day_name, day_hours in location[\"properties\"][\"hoursRegular\"].items():\n if day_hours[\"open\"] == \"closed\" or day_hours[\"close\"] == \"closed\":\n", "issue": "Caffe Nero GB spider using outdated JSON file\nThe caffe_nero_gb.py spider gets its data from JSON file that the Store Finder page at https://caffenero.com/uk/stores/ uses to display its map. However, it looks like that URL of that JSON file has changed, and ATP is still referencing the old (and no longer updated one).\r\n\r\nThe ATP code currently has\r\n`allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]`\r\n`start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]`\r\nBut the URL referenced by https://caffenero.com/uk/stores/ is now\r\nhttps://caffenerowebsite.blob.core.windows.net/production/data/stores/stores-gb.json\r\n\r\nI think the format of the JSON file has remained the same, so it should just be a matter of swapping the URLs over.\r\n\r\nTo help issues like this be picked up sooner in the future, I wonder if there's a way of checking that the JSON URL used is still included in the https://caffenero.com/uk/stores/ page, and producing a warning to anyone running ATP if not?\n", "before_files": [{"content": "from scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.categories import Categories, Extras, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass CaffeNeroGBSpider(Spider):\n name = \"caffe_nero_gb\"\n item_attributes = {\"brand\": \"Caffe Nero\", \"brand_wikidata\": \"Q675808\"}\n allowed_domains = [\"caffenero-webassets-production.s3.eu-west-2.amazonaws.com\"]\n start_urls = [\"https://caffenero-webassets-production.s3.eu-west-2.amazonaws.com/stores/stores_gb.json\"]\n\n def start_requests(self):\n for url in self.start_urls:\n yield JsonRequest(url=url)\n\n def parse(self, response):\n for location in response.json()[\"features\"]:\n if (\n not location[\"properties\"][\"status\"][\"open\"]\n or location[\"properties\"][\"status\"][\"opening_soon\"]\n or location[\"properties\"][\"status\"][\"temp_closed\"]\n ):\n continue\n\n item = DictParser.parse(location[\"properties\"])\n item[\"geometry\"] = location[\"geometry\"]\n if location[\"properties\"][\"status\"][\"express\"]:\n item[\"brand\"] = \"Nero Express\"\n\n item[\"opening_hours\"] = OpeningHours()\n for day_name, day_hours in location[\"properties\"][\"hoursRegular\"].items():\n if day_hours[\"open\"] == \"closed\" or day_hours[\"close\"] == \"closed\":\n continue\n if day_name == \"holiday\":\n continue\n item[\"opening_hours\"].add_range(day_name.title(), day_hours[\"open\"], day_hours[\"close\"])\n\n apply_yes_no(Extras.TAKEAWAY, item, location[\"properties\"][\"status\"][\"takeaway\"], False)\n apply_yes_no(Extras.DELIVERY, item, location[\"properties\"][\"status\"][\"delivery\"], False)\n apply_yes_no(Extras.WIFI, item, location[\"properties\"][\"amenities\"][\"wifi\"], False)\n apply_yes_no(Extras.TOILETS, item, location[\"properties\"][\"amenities\"][\"toilet\"], False)\n apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location[\"properties\"][\"amenities\"][\"baby_change\"], False)\n apply_yes_no(Extras.SMOKING_AREA, item, location[\"properties\"][\"amenities\"][\"smoking_area\"], False)\n apply_yes_no(Extras.AIR_CONDITIONING, item, location[\"properties\"][\"amenities\"][\"air_conditioned\"], False)\n apply_yes_no(Extras.WHEELCHAIR, item, location[\"properties\"][\"amenities\"].get(\"disabled_access\"), False)\n apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location[\"properties\"][\"amenities\"][\"disabled_toilet\"], False)\n apply_yes_no(Extras.OUTDOOR_SEATING, item, location[\"properties\"][\"amenities\"][\"outside_seating\"], False)\n apply_category(Categories.COFFEE_SHOP, item)\n\n item[\"website\"] = f'https://caffenero.com/uk/store/{location[\"properties\"][\"slug\"]}/'\n\n yield item\n", "path": "locations/spiders/caffe_nero_gb.py"}]}
| 1,544 | 495 |
gh_patches_debug_24105
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-372
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The mean value is not shown in the regression systematic error plot
I would expect that near the plot (or when I hover over the mean line in the plot), I would see the mean error value.

To reproduce:
https://www.kaggle.com/itay94/notebookf8c78e84d7
</issue>
<code>
[start of deepchecks/checks/performance/regression_systematic_error.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The RegressionSystematicError check module."""
12 import plotly.graph_objects as go
13 from sklearn.base import BaseEstimator
14 from sklearn.metrics import mean_squared_error
15
16 from deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult
17 from deepchecks.utils.metrics import ModelType, task_type_validation
18 from deepchecks.utils.strings import format_number
19
20
21 __all__ = ['RegressionSystematicError']
22
23
24 class RegressionSystematicError(SingleDatasetBaseCheck):
25 """Check the regression systematic error."""
26
27 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:
28 """Run check.
29
30 Arguments:
31 dataset (Dataset): A dataset object.
32 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
33 Returns:
34 CheckResult:
35 - value is a dict with rmse and mean prediction error.
36 - display is box plot of the prediction errorד.
37 Raises:
38 DeepchecksValueError: If the object is not a Dataset instance with a label
39 """
40 return self._regression_error_distribution(dataset, model)
41
42 def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator):
43 Dataset.validate_dataset(dataset)
44 dataset.validate_label()
45 task_type_validation(model, dataset, [ModelType.REGRESSION])
46
47 y_test = dataset.label_col
48 y_pred = model.predict(dataset.features_columns)
49
50 rmse = mean_squared_error(dataset.label_col, y_pred, squared=False)
51 diff = y_test - y_pred
52 diff_mean = diff.mean()
53
54 display = [
55 'Non-zero mean of the error distribution indicated the presents '
56 'of systematic error in model predictions',
57 go.Figure()
58 .add_trace(go.Box(
59 x=diff.values,
60 name='Model Prediction Error',
61 boxpoints='suspectedoutliers',
62 marker=dict(outliercolor='red'),
63 hoverinfo='x',
64 hoveron='points'))
65 .update_layout(
66 title_text='Box plot of the model prediction error',
67 width=800, height=500)
68 .add_vline(
69 x=diff_mean + 0.01,
70 line_dash='dash',
71 annotation_text='Mean error')
72 ]
73
74 return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)
75
76 def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01):
77 """Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE).
78
79 Args:
80 max_ratio (float): Maximum ratio
81 """
82 def max_bias_condition(result: float) -> ConditionResult:
83 rmse = result['rmse']
84 mean_error = result['mean_error']
85 if abs(mean_error) > max_ratio * rmse:
86 return ConditionResult(False,
87 f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}')
88 else:
89 return ConditionResult(True)
90
91 return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}',
92 max_bias_condition)
93
[end of deepchecks/checks/performance/regression_systematic_error.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py
--- a/deepchecks/checks/performance/regression_systematic_error.py
+++ b/deepchecks/checks/performance/regression_systematic_error.py
@@ -51,24 +51,19 @@
diff = y_test - y_pred
diff_mean = diff.mean()
+ fig = go.Figure()
+ fig.add_trace(go.Box(
+ x=diff,
+ name='Model prediction error',
+ boxmean=True # represent mean
+ )).update_layout(
+ title_text='Box plot of the model prediction error',
+ width=800, height=500)
+
display = [
'Non-zero mean of the error distribution indicated the presents '
'of systematic error in model predictions',
- go.Figure()
- .add_trace(go.Box(
- x=diff.values,
- name='Model Prediction Error',
- boxpoints='suspectedoutliers',
- marker=dict(outliercolor='red'),
- hoverinfo='x',
- hoveron='points'))
- .update_layout(
- title_text='Box plot of the model prediction error',
- width=800, height=500)
- .add_vline(
- x=diff_mean + 0.01,
- line_dash='dash',
- annotation_text='Mean error')
+ fig
]
return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)
|
{"golden_diff": "diff --git a/deepchecks/checks/performance/regression_systematic_error.py b/deepchecks/checks/performance/regression_systematic_error.py\n--- a/deepchecks/checks/performance/regression_systematic_error.py\n+++ b/deepchecks/checks/performance/regression_systematic_error.py\n@@ -51,24 +51,19 @@\n diff = y_test - y_pred\n diff_mean = diff.mean()\n \n+ fig = go.Figure()\n+ fig.add_trace(go.Box(\n+ x=diff,\n+ name='Model prediction error',\n+ boxmean=True # represent mean\n+ )).update_layout(\n+ title_text='Box plot of the model prediction error',\n+ width=800, height=500)\n+\n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n 'of systematic error in model predictions',\n- go.Figure()\n- .add_trace(go.Box(\n- x=diff.values,\n- name='Model Prediction Error',\n- boxpoints='suspectedoutliers',\n- marker=dict(outliercolor='red'),\n- hoverinfo='x',\n- hoveron='points'))\n- .update_layout(\n- title_text='Box plot of the model prediction error',\n- width=800, height=500)\n- .add_vline(\n- x=diff_mean + 0.01,\n- line_dash='dash',\n- annotation_text='Mean error')\n+ fig\n ]\n \n return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)\n", "issue": "The mean value is not shown in the regression systematic error plot\nI would expect that near the plot (or when I hover over the mean line in the plot), I would see the mean error value.\r\n\r\n\r\n\r\nTo reproduce:\r\nhttps://www.kaggle.com/itay94/notebookf8c78e84d7\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The RegressionSystematicError check module.\"\"\"\nimport plotly.graph_objects as go\nfrom sklearn.base import BaseEstimator\nfrom sklearn.metrics import mean_squared_error\n\nfrom deepchecks import CheckResult, Dataset, SingleDatasetBaseCheck, ConditionResult\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\nfrom deepchecks.utils.strings import format_number\n\n\n__all__ = ['RegressionSystematicError']\n\n\nclass RegressionSystematicError(SingleDatasetBaseCheck):\n \"\"\"Check the regression systematic error.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Arguments:\n dataset (Dataset): A dataset object.\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n Returns:\n CheckResult:\n - value is a dict with rmse and mean prediction error.\n - display is box plot of the prediction error\u05d3.\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._regression_error_distribution(dataset, model)\n\n def _regression_error_distribution(self, dataset: Dataset, model: BaseEstimator):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.REGRESSION])\n\n y_test = dataset.label_col\n y_pred = model.predict(dataset.features_columns)\n\n rmse = mean_squared_error(dataset.label_col, y_pred, squared=False)\n diff = y_test - y_pred\n diff_mean = diff.mean()\n\n display = [\n 'Non-zero mean of the error distribution indicated the presents '\n 'of systematic error in model predictions',\n go.Figure()\n .add_trace(go.Box(\n x=diff.values,\n name='Model Prediction Error',\n boxpoints='suspectedoutliers',\n marker=dict(outliercolor='red'),\n hoverinfo='x',\n hoveron='points'))\n .update_layout(\n title_text='Box plot of the model prediction error',\n width=800, height=500)\n .add_vline(\n x=diff_mean + 0.01,\n line_dash='dash',\n annotation_text='Mean error')\n ]\n\n return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display)\n\n def add_condition_systematic_error_ratio_to_rmse_not_greater_than(self, max_ratio: float = 0.01):\n \"\"\"Add condition - require the absolute mean systematic error to be not greater than (max_ratio * RMSE).\n\n Args:\n max_ratio (float): Maximum ratio\n \"\"\"\n def max_bias_condition(result: float) -> ConditionResult:\n rmse = result['rmse']\n mean_error = result['mean_error']\n if abs(mean_error) > max_ratio * rmse:\n return ConditionResult(False,\n f'mean error: {format_number(mean_error, 5)}, RMSE: {format_number(rmse)}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Bias ratio is not greater than {format_number(max_ratio)}',\n max_bias_condition)\n", "path": "deepchecks/checks/performance/regression_systematic_error.py"}]}
| 1,623 | 358 |
gh_patches_debug_39617
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-150
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Expose the full response from the token server
`refresh` on oauth2 Credentials should store the full response from the token server. There is potentially useful data here, like the `id_token`.
</issue>
<code>
[start of google/oauth2/credentials.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OAuth 2.0 Credentials.
16
17 This module provides credentials based on OAuth 2.0 access and refresh tokens.
18 These credentials usually access resources on behalf of a user (resource
19 owner).
20
21 Specifically, this is intended to use access tokens acquired using the
22 `Authorization Code grant`_ and can refresh those tokens using a
23 optional `refresh token`_.
24
25 Obtaining the initial access and refresh token is outside of the scope of this
26 module. Consult `rfc6749 section 4.1`_ for complete details on the
27 Authorization Code grant flow.
28
29 .. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
30 .. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
31 .. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
32 """
33
34 from google.auth import _helpers
35 from google.auth import credentials
36 from google.oauth2 import _client
37
38
39 class Credentials(credentials.Scoped, credentials.Credentials):
40 """Credentials using OAuth 2.0 access and refresh tokens."""
41
42 def __init__(self, token, refresh_token=None, token_uri=None,
43 client_id=None, client_secret=None, scopes=None):
44 """
45 Args:
46 token (Optional(str)): The OAuth 2.0 access token. Can be None
47 if refresh information is provided.
48 refresh_token (str): The OAuth 2.0 refresh token. If specified,
49 credentials can be refreshed.
50 token_uri (str): The OAuth 2.0 authorization server's token
51 endpoint URI. Must be specified for refresh, can be left as
52 None if the token can not be refreshed.
53 client_id (str): The OAuth 2.0 client ID. Must be specified for
54 refresh, can be left as None if the token can not be refreshed.
55 client_secret(str): The OAuth 2.0 client secret. Must be specified
56 for refresh, can be left as None if the token can not be
57 refreshed.
58 scopes (Sequence[str]): The scopes that were originally used
59 to obtain authorization. This is a purely informative parameter
60 that can be used by :meth:`has_scopes`. OAuth 2.0 credentials
61 can not request additional scopes after authorization.
62 """
63 super(Credentials, self).__init__()
64 self.token = token
65 self._refresh_token = refresh_token
66 self._scopes = scopes
67 self._token_uri = token_uri
68 self._client_id = client_id
69 self._client_secret = client_secret
70
71 @property
72 def refresh_token(self):
73 """Optional[str]: The OAuth 2.0 refresh token."""
74 return self._refresh_token
75
76 @property
77 def token_uri(self):
78 """Optional[str]: The OAuth 2.0 authorization server's token endpoint
79 URI."""
80 return self._token_uri
81
82 @property
83 def client_id(self):
84 """Optional[str]: The OAuth 2.0 client ID."""
85 return self._client_id
86
87 @property
88 def client_secret(self):
89 """Optional[str]: The OAuth 2.0 client secret."""
90 return self._client_secret
91
92 @property
93 def requires_scopes(self):
94 """False: OAuth 2.0 credentials have their scopes set when
95 the initial token is requested and can not be changed."""
96 return False
97
98 def with_scopes(self, scopes):
99 """Unavailable, OAuth 2.0 credentials can not be re-scoped.
100
101 OAuth 2.0 credentials have their scopes set when the initial token is
102 requested and can not be changed.
103 """
104 raise NotImplementedError(
105 'OAuth 2.0 Credentials can not modify their scopes.')
106
107 @_helpers.copy_docstring(credentials.Credentials)
108 def refresh(self, request):
109 access_token, refresh_token, expiry, _ = _client.refresh_grant(
110 request, self._token_uri, self._refresh_token, self._client_id,
111 self._client_secret)
112
113 self.token = access_token
114 self.expiry = expiry
115 self._refresh_token = refresh_token
116
[end of google/oauth2/credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py
--- a/google/oauth2/credentials.py
+++ b/google/oauth2/credentials.py
@@ -39,14 +39,16 @@
class Credentials(credentials.Scoped, credentials.Credentials):
"""Credentials using OAuth 2.0 access and refresh tokens."""
- def __init__(self, token, refresh_token=None, token_uri=None,
- client_id=None, client_secret=None, scopes=None):
+ def __init__(self, token, refresh_token=None, id_token=None,
+ token_uri=None, client_id=None, client_secret=None,
+ scopes=None):
"""
Args:
token (Optional(str)): The OAuth 2.0 access token. Can be None
if refresh information is provided.
refresh_token (str): The OAuth 2.0 refresh token. If specified,
credentials can be refreshed.
+ id_token (str): The Open ID Connect ID Token.
token_uri (str): The OAuth 2.0 authorization server's token
endpoint URI. Must be specified for refresh, can be left as
None if the token can not be refreshed.
@@ -63,6 +65,7 @@
super(Credentials, self).__init__()
self.token = token
self._refresh_token = refresh_token
+ self._id_token = id_token
self._scopes = scopes
self._token_uri = token_uri
self._client_id = client_id
@@ -79,6 +82,17 @@
URI."""
return self._token_uri
+ @property
+ def id_token(self):
+ """Optional[str]: The Open ID Connect ID Token.
+
+ Depending on the authorization server and the scopes requested, this
+ may be populated when credentials are obtained and updated when
+ :meth:`refresh` is called. This token is a JWT. It can be verified
+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.
+ """
+ return self._id_token
+
@property
def client_id(self):
"""Optional[str]: The OAuth 2.0 client ID."""
@@ -106,10 +120,12 @@
@_helpers.copy_docstring(credentials.Credentials)
def refresh(self, request):
- access_token, refresh_token, expiry, _ = _client.refresh_grant(
- request, self._token_uri, self._refresh_token, self._client_id,
- self._client_secret)
+ access_token, refresh_token, expiry, grant_response = (
+ _client.refresh_grant(
+ request, self._token_uri, self._refresh_token, self._client_id,
+ self._client_secret))
self.token = access_token
self.expiry = expiry
self._refresh_token = refresh_token
+ self._id_token = grant_response.get('id_token')
|
{"golden_diff": "diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py\n--- a/google/oauth2/credentials.py\n+++ b/google/oauth2/credentials.py\n@@ -39,14 +39,16 @@\n class Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n \n- def __init__(self, token, refresh_token=None, token_uri=None,\n- client_id=None, client_secret=None, scopes=None):\n+ def __init__(self, token, refresh_token=None, id_token=None,\n+ token_uri=None, client_id=None, client_secret=None,\n+ scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n+ id_token (str): The Open ID Connect ID Token.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n@@ -63,6 +65,7 @@\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n+ self._id_token = id_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n@@ -79,6 +82,17 @@\n URI.\"\"\"\n return self._token_uri\n \n+ @property\n+ def id_token(self):\n+ \"\"\"Optional[str]: The Open ID Connect ID Token.\n+\n+ Depending on the authorization server and the scopes requested, this\n+ may be populated when credentials are obtained and updated when\n+ :meth:`refresh` is called. This token is a JWT. It can be verified\n+ and decoded using :func:`google.oauth2.id_token.verify_oauth2_token`.\n+ \"\"\"\n+ return self._id_token\n+\n @property\n def client_id(self):\n \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n@@ -106,10 +120,12 @@\n \n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n- access_token, refresh_token, expiry, _ = _client.refresh_grant(\n- request, self._token_uri, self._refresh_token, self._client_id,\n- self._client_secret)\n+ access_token, refresh_token, expiry, grant_response = (\n+ _client.refresh_grant(\n+ request, self._token_uri, self._refresh_token, self._client_id,\n+ self._client_secret))\n \n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n+ self._id_token = grant_response.get('id_token')\n", "issue": "Expose the full response from the token server\n`refresh` on oauth2 Credentials should store the full response from the token server. There is potentially useful data here, like the `id_token`.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OAuth 2.0 Credentials.\n\nThis module provides credentials based on OAuth 2.0 access and refresh tokens.\nThese credentials usually access resources on behalf of a user (resource\nowner).\n\nSpecifically, this is intended to use access tokens acquired using the\n`Authorization Code grant`_ and can refresh those tokens using a\noptional `refresh token`_.\n\nObtaining the initial access and refresh token is outside of the scope of this\nmodule. Consult `rfc6749 section 4.1`_ for complete details on the\nAuthorization Code grant flow.\n\n.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1\n.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6\n.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1\n\"\"\"\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n\n def __init__(self, token, refresh_token=None, token_uri=None,\n client_id=None, client_secret=None, scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n client_id (str): The OAuth 2.0 client ID. Must be specified for\n refresh, can be left as None if the token can not be refreshed.\n client_secret(str): The OAuth 2.0 client secret. Must be specified\n for refresh, can be left as None if the token can not be\n refreshed.\n scopes (Sequence[str]): The scopes that were originally used\n to obtain authorization. This is a purely informative parameter\n that can be used by :meth:`has_scopes`. OAuth 2.0 credentials\n can not request additional scopes after authorization.\n \"\"\"\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n self._client_secret = client_secret\n\n @property\n def refresh_token(self):\n \"\"\"Optional[str]: The OAuth 2.0 refresh token.\"\"\"\n return self._refresh_token\n\n @property\n def token_uri(self):\n \"\"\"Optional[str]: The OAuth 2.0 authorization server's token endpoint\n URI.\"\"\"\n return self._token_uri\n\n @property\n def client_id(self):\n \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n return self._client_id\n\n @property\n def client_secret(self):\n \"\"\"Optional[str]: The OAuth 2.0 client secret.\"\"\"\n return self._client_secret\n\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n the initial token is requested and can not be changed.\"\"\"\n return False\n\n def with_scopes(self, scopes):\n \"\"\"Unavailable, OAuth 2.0 credentials can not be re-scoped.\n\n OAuth 2.0 credentials have their scopes set when the initial token is\n requested and can not be changed.\n \"\"\"\n raise NotImplementedError(\n 'OAuth 2.0 Credentials can not modify their scopes.')\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n access_token, refresh_token, expiry, _ = _client.refresh_grant(\n request, self._token_uri, self._refresh_token, self._client_id,\n self._client_secret)\n\n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n", "path": "google/oauth2/credentials.py"}]}
| 1,847 | 640 |
gh_patches_debug_20352
|
rasdani/github-patches
|
git_diff
|
google__flax-2591
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pooling operations should support inputs with multiple batch dimensions.
Provide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.
### System information
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 22.04 (anaconda)
- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: flax=0.6.1, jax=0.3.23, jaxlib=0.3.22+cuda11.cudnn82
- Python version: 3.10.6
- GPU/TPU model and memory: RTX3060, 12GB
- CUDA version (if applicable): 11.3
### Problem you have encountered:
In Flax, pooling operations (i.e., avg_pool and max_pool) only support arrays with shape (batch, window dims..., features) or (window dims..., features) as their inputs. If arrays with multiple batch dimensions are given, pooling operations raise AssertionError.
However, many layers such as Dense, Conv, BatchNorm, etc. support inputs with multiple batch dimensions. For example, the docstring of Conv.__call__ explains that "If the input has more than 1 batch dimension, all batch dimensions are flattened into a single dimension for the convolution and restored before returning". I think supporting inputs with multiple batch dimensions is sometimes useful, and pooling operations should support such inputs like other layers.
### What you expected to happen:
avg_pool and max_pool should support arrays with shape (*batch_dims, window dims..., features) where len(batch_dims)>2 as their inputs.
### Logs, error messages, etc:
When I run the code in "steps to reproduce", the following error is raised.
Traceback (most recent call last):
File "/home/hayato/Code/research/pool.py", line 15, in <module>
y = linen.max_pool(x, (2, 2), (2, 2))
File "/home/hayato/miniconda3/envs/jax/lib/python3.10/site-packages/flax/linen/pooling.py", line 113, in max_pool
y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
File "/home/hayato/miniconda3/envs/jax/lib/python3.10/site-packages/flax/linen/pooling.py", line 57, in pool
assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
AssertionError: len((1, 8, 32, 32, 3)) != len((1, 2, 2, 1))
### Steps to reproduce:
Whenever possible, please provide a *minimal example*. Please consider submitting it as a Colab link.
```python
import jax.random as jr
import jax.numpy as jnp
from flax import linen
# Image-like array with multiple batch dimensions. batch_dims=(1, 8)
x = jnp.zeros((1, 8, 32, 32, 3), dtype=jnp.float32)
# Conv does not raise any errors.
y, _ = linen.Conv(16, (3, 3)).init_with_output(jr.PRNGKey(0), x)
print(y.shape) # (1, 8, 32, 32, 16)
# max_pool raises AssertionError.
y = linen.max_pool(x, (2, 2), (2, 2))
print(y.shape) # Expected output: (1, 8, 16, 16, 3)
```
</issue>
<code>
[start of flax/linen/pooling.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as np
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply that
29 pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides (default: `(1, ..., 1)`).
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 assert len(window_shape) == len(strides), (
46 f"len({window_shape}) must equal len({strides})")
47 strides = (1,) + strides + (1,)
48 dims = (1,) + window_shape + (1,)
49
50 is_single_input = False
51 if inputs.ndim == len(dims) - 1:
52 # add singleton batch dimension because lax.reduce_window always
53 # needs a batch dimension.
54 inputs = inputs[None]
55 is_single_input = True
56
57 assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
58 if not isinstance(padding, str):
59 padding = tuple(map(tuple, padding))
60 assert len(padding) == len(window_shape), (
61 f"padding {padding} must specify pads for same number of dims as "
62 f"window_shape {window_shape}")
63 assert all([len(x) == 2 for x in padding]), (
64 f"each entry in padding {padding} must be length 2")
65 padding = ((0, 0),) + padding + ((0, 0),)
66 y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
67 if is_single_input:
68 y = jnp.squeeze(y, axis=0)
69 return y
70
71
72 def avg_pool(inputs, window_shape, strides=None, padding="VALID", count_include_pad=True):
73 """Pools the input by taking the average over a window.
74
75 Args:
76 inputs: input data with dimensions (batch, window dims..., features).
77 window_shape: a shape tuple defining the window to reduce over.
78 strides: a sequence of `n` integers, representing the inter-window
79 strides (default: `(1, ..., 1)`).
80 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
81 of `n` `(low, high)` integer pairs that give the padding to apply before
82 and after each spatial dimension (default: `'VALID'`).
83 count_include_pad: a boolean whether to include padded tokens
84 in the average calculation (default: `True`).
85 Returns:
86 The average for each window slice.
87 """
88 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
89 if count_include_pad:
90 y = y / np.prod(window_shape)
91 else:
92 div_shape = inputs.shape[:-1] + (1,)
93 if len(div_shape) - 2 == len(window_shape):
94 div_shape = (1,) + div_shape[1:]
95 y = y / pool(jnp.ones(div_shape), 0., lax.add, window_shape, strides, padding)
96 return y
97
98
99 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
100 """Pools the input by taking the maximum of a window slice.
101
102 Args:
103 inputs: input data with dimensions (batch, window dims..., features).
104 window_shape: a shape tuple defining the window to reduce over.
105 strides: a sequence of `n` integers, representing the inter-window
106 strides (default: `(1, ..., 1)`).
107 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
108 of `n` `(low, high)` integer pairs that give the padding to apply before
109 and after each spatial dimension (default: `'VALID'`).
110 Returns:
111 The maximum for each window slice.
112 """
113 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
114 return y
115
116
117 def min_pool(inputs, window_shape, strides=None, padding="VALID"):
118 """Pools the input by taking the minimum of a window slice.
119
120 Args:
121 inputs: Input data with dimensions (batch, window dims..., features).
122 window_shape: A shape tuple defining the window to reduce over.
123 strides: A sequence of `n` integers, representing the inter-window strides
124 (default: `(1, ..., 1)`).
125 padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
126 `n` `(low, high)` integer pairs that give the padding to apply before and
127 after each spatial dimension (default: `'VALID'`).
128
129 Returns:
130 The minimum for each window slice.
131 """
132 return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)
133
[end of flax/linen/pooling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py
--- a/flax/linen/pooling.py
+++ b/flax/linen/pooling.py
@@ -41,17 +41,20 @@
Returns:
The output of the reduction for each window slice.
"""
+ num_batch_dims = inputs.ndim - (len(window_shape) + 1)
strides = strides or (1,) * len(window_shape)
assert len(window_shape) == len(strides), (
f"len({window_shape}) must equal len({strides})")
- strides = (1,) + strides + (1,)
- dims = (1,) + window_shape + (1,)
+ strides = (1,) * num_batch_dims + strides + (1,)
+ dims = (1,) * num_batch_dims + window_shape + (1,)
is_single_input = False
- if inputs.ndim == len(dims) - 1:
+ if num_batch_dims == 0:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
+ strides = (1,) + strides
+ dims = (1,) + dims
is_single_input = True
assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
|
{"golden_diff": "diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py\n--- a/flax/linen/pooling.py\n+++ b/flax/linen/pooling.py\n@@ -41,17 +41,20 @@\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n+ num_batch_dims = inputs.ndim - (len(window_shape) + 1)\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n- strides = (1,) + strides + (1,)\n- dims = (1,) + window_shape + (1,)\n+ strides = (1,) * num_batch_dims + strides + (1,)\n+ dims = (1,) * num_batch_dims + window_shape + (1,)\n \n is_single_input = False\n- if inputs.ndim == len(dims) - 1:\n+ if num_batch_dims == 0:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n+ strides = (1,) + strides\n+ dims = (1,) + dims\n is_single_input = True\n \n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n", "issue": "Pooling operations should support inputs with multiple batch dimensions.\nProvide as much information as possible. At least, this should include a description of your issue and steps to reproduce the problem. If possible also provide a summary of what steps or workarounds you have already tried.\r\n \r\n### System information\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 22.04 (anaconda)\r\n- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: flax=0.6.1, jax=0.3.23, jaxlib=0.3.22+cuda11.cudnn82\r\n- Python version: 3.10.6\r\n- GPU/TPU model and memory: RTX3060, 12GB\r\n- CUDA version (if applicable): 11.3\r\n\r\n\r\n### Problem you have encountered:\r\nIn Flax, pooling operations (i.e., avg_pool and max_pool) only support arrays with shape (batch, window dims..., features) or (window dims..., features) as their inputs. If arrays with multiple batch dimensions are given, pooling operations raise AssertionError. \r\n\r\nHowever, many layers such as Dense, Conv, BatchNorm, etc. support inputs with multiple batch dimensions. For example, the docstring of Conv.__call__ explains that \"If the input has more than 1 batch dimension, all batch dimensions are flattened into a single dimension for the convolution and restored before returning\". I think supporting inputs with multiple batch dimensions is sometimes useful, and pooling operations should support such inputs like other layers. \r\n\r\n### What you expected to happen:\r\navg_pool and max_pool should support arrays with shape (*batch_dims, window dims..., features) where len(batch_dims)>2 as their inputs. \r\n\r\n### Logs, error messages, etc:\r\nWhen I run the code in \"steps to reproduce\", the following error is raised.\r\n\r\nTraceback (most recent call last):\r\n File \"/home/hayato/Code/research/pool.py\", line 15, in <module>\r\n y = linen.max_pool(x, (2, 2), (2, 2))\r\n File \"/home/hayato/miniconda3/envs/jax/lib/python3.10/site-packages/flax/linen/pooling.py\", line 113, in max_pool\r\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\r\n File \"/home/hayato/miniconda3/envs/jax/lib/python3.10/site-packages/flax/linen/pooling.py\", line 57, in pool\r\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\r\nAssertionError: len((1, 8, 32, 32, 3)) != len((1, 2, 2, 1))\r\n\r\n\r\n### Steps to reproduce:\r\nWhenever possible, please provide a *minimal example*. Please consider submitting it as a Colab link.\r\n\r\n```python\r\nimport jax.random as jr\r\nimport jax.numpy as jnp\r\nfrom flax import linen\r\n\r\n# Image-like array with multiple batch dimensions. batch_dims=(1, 8)\r\nx = jnp.zeros((1, 8, 32, 32, 3), dtype=jnp.float32)\r\n\r\n# Conv does not raise any errors.\r\ny, _ = linen.Conv(16, (3, 3)).init_with_output(jr.PRNGKey(0), x) \r\nprint(y.shape) # (1, 8, 32, 32, 16)\r\n\r\n# max_pool raises AssertionError.\r\ny = linen.max_pool(x, (2, 2), (2, 2)) \r\nprint(y.shape) # Expected output: (1, 8, 16, 16, 3)\r\n```\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply that\n pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\", count_include_pad=True):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n count_include_pad: a boolean whether to include padded tokens\n in the average calculation (default: `True`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n if count_include_pad:\n y = y / np.prod(window_shape)\n else:\n div_shape = inputs.shape[:-1] + (1,)\n if len(div_shape) - 2 == len(window_shape):\n div_shape = (1,) + div_shape[1:]\n y = y / pool(jnp.ones(div_shape), 0., lax.add, window_shape, strides, padding)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}]}
| 3,010 | 305 |
gh_patches_debug_18776
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1239
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`context` sometimes gets printed with the `set` command
`set $rax=0` sometimes causes `context` to be immediately called afterwards. I think don't think this is always reproducible, but will keep investigating it.
</issue>
<code>
[start of pwndbg/gdblib/prompt.py]
1 import re
2
3 import gdb
4
5 import pwndbg.decorators
6 import pwndbg.gdblib.events
7 import pwndbg.gdbutils
8 import pwndbg.lib.memoize
9 from pwndbg.color import disable_colors
10 from pwndbg.color import message
11 from pwndbg.lib.tips import get_tip_of_the_day
12
13 funcs_list_str = ", ".join(
14 message.notice("$" + f.name) for f in pwndbg.gdbutils.functions.functions
15 )
16
17 num_pwndbg_cmds = sum(1 for _ in filter(lambda c: not c.shell, pwndbg.commands.commands))
18 num_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))
19 hint_lines = (
20 "loaded %i pwndbg commands and %i shell commands. Type %s for a list."
21 % (num_pwndbg_cmds, num_shell_cmds, message.notice("pwndbg [--shell | --all] [filter]")),
22 "created %s gdb functions (can be used with print/break)" % funcs_list_str,
23 )
24
25 for line in hint_lines:
26 print(message.prompt("pwndbg: ") + message.system(line))
27
28 # noinspection PyPackageRequirements
29 show_tip = pwndbg.config.Parameter(
30 "show-tips", True, "whether to display the tip of the day on startup"
31 )
32
33 cur = None
34
35
36 def initial_hook(*a):
37 if show_tip and not pwndbg.decorators.first_prompt:
38 colored_tip = re.sub(
39 "`(.*?)`", lambda s: message.warn(s.group()[1:-1]), get_tip_of_the_day()
40 )
41 print(
42 message.prompt("------- tip of the day")
43 + message.system(" (disable with %s)" % message.notice("set show-tips off"))
44 + message.prompt(" -------")
45 )
46 print((colored_tip))
47 pwndbg.decorators.first_prompt = True
48
49 prompt_hook(*a)
50 gdb.prompt_hook = prompt_hook
51
52
53 def prompt_hook(*a):
54 global cur
55
56 new = (gdb.selected_inferior(), gdb.selected_thread())
57
58 if cur != new:
59 pwndbg.gdblib.events.after_reload(start=cur is None)
60 cur = new
61
62 if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:
63 prompt_hook_on_stop(*a)
64
65
66 @pwndbg.lib.memoize.reset_on_stop
67 def prompt_hook_on_stop(*a):
68 pwndbg.commands.context.context()
69
70
71 @pwndbg.config.Trigger([message.config_prompt_color, disable_colors])
72 def set_prompt():
73 prompt = "pwndbg> "
74
75 if not disable_colors:
76 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH
77 prompt = message.prompt(prompt)
78 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
79
80 gdb.execute("set prompt %s" % prompt)
81
82
83 if pwndbg.gdblib.events.before_prompt_event.is_real_event:
84 gdb.prompt_hook = initial_hook
85
86 else:
87 # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook
88 def extended_prompt_hook(*a):
89 pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()
90 return prompt_hook(*a)
91
92 gdb.prompt_hook = extended_prompt_hook
93
[end of pwndbg/gdblib/prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py
--- a/pwndbg/gdblib/prompt.py
+++ b/pwndbg/gdblib/prompt.py
@@ -50,8 +50,11 @@
gdb.prompt_hook = prompt_hook
+context_shown = False
+
+
def prompt_hook(*a):
- global cur
+ global cur, context_shown
new = (gdb.selected_inferior(), gdb.selected_thread())
@@ -59,13 +62,15 @@
pwndbg.gdblib.events.after_reload(start=cur is None)
cur = new
- if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:
- prompt_hook_on_stop(*a)
+ if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped and not context_shown:
+ pwndbg.commands.context.context()
+ context_shown = True
[email protected]_on_stop
-def prompt_hook_on_stop(*a):
- pwndbg.commands.context.context()
[email protected]
+def reset_context_shown(*a):
+ global context_shown
+ context_shown = False
@pwndbg.config.Trigger([message.config_prompt_color, disable_colors])
|
{"golden_diff": "diff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py\n--- a/pwndbg/gdblib/prompt.py\n+++ b/pwndbg/gdblib/prompt.py\n@@ -50,8 +50,11 @@\n gdb.prompt_hook = prompt_hook\n \n \n+context_shown = False\n+\n+\n def prompt_hook(*a):\n- global cur\n+ global cur, context_shown\n \n new = (gdb.selected_inferior(), gdb.selected_thread())\n \n@@ -59,13 +62,15 @@\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n \n- if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n- prompt_hook_on_stop(*a)\n+ if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped and not context_shown:\n+ pwndbg.commands.context.context()\n+ context_shown = True\n \n \[email protected]_on_stop\n-def prompt_hook_on_stop(*a):\n- pwndbg.commands.context.context()\[email protected]\n+def reset_context_shown(*a):\n+ global context_shown\n+ context_shown = False\n \n \n @pwndbg.config.Trigger([message.config_prompt_color, disable_colors])\n", "issue": "`context` sometimes gets printed with the `set` command\n`set $rax=0` sometimes causes `context` to be immediately called afterwards. I think don't think this is always reproducible, but will keep investigating it.\n", "before_files": [{"content": "import re\n\nimport gdb\n\nimport pwndbg.decorators\nimport pwndbg.gdblib.events\nimport pwndbg.gdbutils\nimport pwndbg.lib.memoize\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.lib.tips import get_tip_of_the_day\n\nfuncs_list_str = \", \".join(\n message.notice(\"$\" + f.name) for f in pwndbg.gdbutils.functions.functions\n)\n\nnum_pwndbg_cmds = sum(1 for _ in filter(lambda c: not c.shell, pwndbg.commands.commands))\nnum_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))\nhint_lines = (\n \"loaded %i pwndbg commands and %i shell commands. Type %s for a list.\"\n % (num_pwndbg_cmds, num_shell_cmds, message.notice(\"pwndbg [--shell | --all] [filter]\")),\n \"created %s gdb functions (can be used with print/break)\" % funcs_list_str,\n)\n\nfor line in hint_lines:\n print(message.prompt(\"pwndbg: \") + message.system(line))\n\n# noinspection PyPackageRequirements\nshow_tip = pwndbg.config.Parameter(\n \"show-tips\", True, \"whether to display the tip of the day on startup\"\n)\n\ncur = None\n\n\ndef initial_hook(*a):\n if show_tip and not pwndbg.decorators.first_prompt:\n colored_tip = re.sub(\n \"`(.*?)`\", lambda s: message.warn(s.group()[1:-1]), get_tip_of_the_day()\n )\n print(\n message.prompt(\"------- tip of the day\")\n + message.system(\" (disable with %s)\" % message.notice(\"set show-tips off\"))\n + message.prompt(\" -------\")\n )\n print((colored_tip))\n pwndbg.decorators.first_prompt = True\n\n prompt_hook(*a)\n gdb.prompt_hook = prompt_hook\n\n\ndef prompt_hook(*a):\n global cur\n\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\[email protected]([message.config_prompt_color, disable_colors])\ndef set_prompt():\n prompt = \"pwndbg> \"\n\n if not disable_colors:\n prompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\n prompt = message.prompt(prompt)\n prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\n gdb.execute(\"set prompt %s\" % prompt)\n\n\nif pwndbg.gdblib.events.before_prompt_event.is_real_event:\n gdb.prompt_hook = initial_hook\n\nelse:\n # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\n def extended_prompt_hook(*a):\n pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()\n return prompt_hook(*a)\n\n gdb.prompt_hook = extended_prompt_hook\n", "path": "pwndbg/gdblib/prompt.py"}]}
| 1,503 | 301 |
gh_patches_debug_18658
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1344
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
set_schema_ft() SQL function delete some triggers
set_schema_ft() contains as "DROP FUNCTION ... CASCADE" that delete some other functions or triggers, eg. e_t_evenement_geom_iu_tgr.
All 0.28.x releases are affected.
I think we should create functions directly in the right schema and drop functions from public schema rather than moving them.
</issue>
<code>
[start of geotrek/common/utils/postgresql.py]
1 import re
2 import os
3 import logging
4 import traceback
5 from functools import wraps
6
7 from django.db import connection, models
8 from django.conf import settings
9 from django.db.models import get_app, get_models
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def debug_pg_notices(f):
16
17 @wraps(f)
18 def wrapped(*args, **kwargs):
19 before = len(connection.connection.notices) if connection.connection else 0
20 try:
21 r = f(*args, **kwargs)
22 finally:
23 # Show triggers output
24 allnotices = []
25 current = ''
26 if connection.connection:
27 notices = []
28 for notice in connection.connection.notices[before:]:
29 try:
30 notice, context = notice.split('CONTEXT:', 1)
31 context = re.sub("\s+", " ", context)
32 except ValueError:
33 context = ''
34 notices.append((context, notice))
35 if context != current:
36 allnotices.append(notices)
37 notices = []
38 current = context
39 allnotices.append(notices)
40 current = ''
41 for notices in allnotices:
42 for context, notice in notices:
43 if context != current:
44 if context != '':
45 logger.debug('Context %s...:' % context.strip()[:80])
46 current = context
47 notice = notice.replace('NOTICE: ', '')
48 prefix = '' if context == '' else ' '
49 logger.debug('%s%s' % (prefix, notice.strip()))
50 return r
51
52 return wrapped
53
54
55 def load_sql_files(app_label):
56 """
57 Look for SQL files in Django app, and load them into database.
58 We remove RAISE NOTICE instructions from SQL outside unit testing
59 since they lead to interpolation errors of '%' character in python.
60 """
61 app_dir = os.path.dirname(models.get_app(app_label).__file__)
62 sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
63 if not os.path.exists(sql_dir):
64 logger.debug("No SQL folder for %s" % app_label)
65 return
66
67 r = re.compile(r'^.*\.sql$')
68 sql_files = [os.path.join(sql_dir, f)
69 for f in os.listdir(sql_dir)
70 if r.match(f) is not None]
71 sql_files.sort()
72
73 if len(sql_files) == 0:
74 logger.warning("Empty folder %s" % sql_dir)
75
76 cursor = connection.cursor()
77 for sql_file in sql_files:
78 try:
79 logger.info("Loading initial SQL data from '%s'" % sql_file)
80 f = open(sql_file)
81 sql = f.read()
82 f.close()
83 if not settings.TEST:
84 # Remove RAISE NOTICE (/!\ only one-liners)
85 sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
86 # TODO: this is the ugliest driver hack ever
87 sql = sql.replace('%', '%%')
88
89 # Replace curly braces with settings values
90 pattern = re.compile(r'{{\s*(.*)\s*}}')
91 for m in pattern.finditer(sql):
92 value = getattr(settings, m.group(1))
93 sql = sql.replace(m.group(0), unicode(value))
94 cursor.execute(sql)
95 except Exception as e:
96 logger.critical("Failed to install custom SQL file '%s': %s\n" %
97 (sql_file, e))
98 traceback.print_exc()
99 raise
100
101
102 def move_models_to_schemas(app_label):
103 """
104 Move models tables to PostgreSQL schemas.
105
106 Views, functions and triggers will be moved in Geotrek app SQL files.
107 """
108 app = get_app(app_label)
109 default_schema = settings.DATABASE_SCHEMAS.get('default')
110 app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)
111
112 table_schemas = {}
113 for model in get_models(app):
114 model_name = model._meta.module_name
115 table_name = model._meta.db_table
116 model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
117 table_schemas.setdefault(model_schema, []).append(table_name)
118
119 for m2m_field in model._meta.many_to_many:
120 table_name = m2m_field.db_table
121 if table_name:
122 table_schemas[model_schema].append(table_name)
123
124 cursor = connection.cursor()
125
126 for schema_name in table_schemas.keys():
127 try:
128 sql = "CREATE SCHEMA %s;" % model_schema
129 cursor.execute(sql)
130 logger.info("Created schema %s" % model_schema)
131 except Exception:
132 logger.debug("Schema %s already exists." % model_schema)
133
134 for schema_name, tables in table_schemas.items():
135 for table_name in tables:
136 try:
137 sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
138 cursor.execute(sql)
139 logger.info("Moved %s to schema %s" % (table_name, schema_name))
140 except Exception:
141 logger.debug("Table %s already in schema %s" % (table_name, schema_name))
142
143 # For Django, search_path is set in connection options.
144 # But when accessing the database using QGis or ETL, search_path must be
145 # set database level (for all users, and for this database only).
146 if app_label == 'common':
147 dbname = settings.DATABASES['default']['NAME']
148 dbuser = settings.DATABASES['default']['USER']
149 search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))
150 sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
151 cursor.execute(sql)
152
[end of geotrek/common/utils/postgresql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py
--- a/geotrek/common/utils/postgresql.py
+++ b/geotrek/common/utils/postgresql.py
@@ -133,12 +133,12 @@
for schema_name, tables in table_schemas.items():
for table_name in tables:
- try:
+ sql = "SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s"
+ cursor.execute(sql, [table_name, schema_name])
+ if cursor.fetchone():
sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
cursor.execute(sql)
logger.info("Moved %s to schema %s" % (table_name, schema_name))
- except Exception:
- logger.debug("Table %s already in schema %s" % (table_name, schema_name))
# For Django, search_path is set in connection options.
# But when accessing the database using QGis or ETL, search_path must be
|
{"golden_diff": "diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py\n--- a/geotrek/common/utils/postgresql.py\n+++ b/geotrek/common/utils/postgresql.py\n@@ -133,12 +133,12 @@\n \n for schema_name, tables in table_schemas.items():\n for table_name in tables:\n- try:\n+ sql = \"SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s\"\n+ cursor.execute(sql, [table_name, schema_name])\n+ if cursor.fetchone():\n sql = \"ALTER TABLE %s SET SCHEMA %s;\" % (table_name, schema_name)\n cursor.execute(sql)\n logger.info(\"Moved %s to schema %s\" % (table_name, schema_name))\n- except Exception:\n- logger.debug(\"Table %s already in schema %s\" % (table_name, schema_name))\n \n # For Django, search_path is set in connection options.\n # But when accessing the database using QGis or ETL, search_path must be\n", "issue": "set_schema_ft() SQL function delete some triggers\nset_schema_ft() contains as \"DROP FUNCTION ... CASCADE\" that delete some other functions or triggers, eg. e_t_evenement_geom_iu_tgr.\n\nAll 0.28.x releases are affected.\n\nI think we should create functions directly in the right schema and drop functions from public schema rather than moving them.\n\n", "before_files": [{"content": "import re\nimport os\nimport logging\nimport traceback\nfrom functools import wraps\n\nfrom django.db import connection, models\nfrom django.conf import settings\nfrom django.db.models import get_app, get_models\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef debug_pg_notices(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n before = len(connection.connection.notices) if connection.connection else 0\n try:\n r = f(*args, **kwargs)\n finally:\n # Show triggers output\n allnotices = []\n current = ''\n if connection.connection:\n notices = []\n for notice in connection.connection.notices[before:]:\n try:\n notice, context = notice.split('CONTEXT:', 1)\n context = re.sub(\"\\s+\", \" \", context)\n except ValueError:\n context = ''\n notices.append((context, notice))\n if context != current:\n allnotices.append(notices)\n notices = []\n current = context\n allnotices.append(notices)\n current = ''\n for notices in allnotices:\n for context, notice in notices:\n if context != current:\n if context != '':\n logger.debug('Context %s...:' % context.strip()[:80])\n current = context\n notice = notice.replace('NOTICE: ', '')\n prefix = '' if context == '' else ' '\n logger.debug('%s%s' % (prefix, notice.strip()))\n return r\n\n return wrapped\n\n\ndef load_sql_files(app_label):\n \"\"\"\n Look for SQL files in Django app, and load them into database.\n We remove RAISE NOTICE instructions from SQL outside unit testing\n since they lead to interpolation errors of '%' character in python.\n \"\"\"\n app_dir = os.path.dirname(models.get_app(app_label).__file__)\n sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))\n if not os.path.exists(sql_dir):\n logger.debug(\"No SQL folder for %s\" % app_label)\n return\n\n r = re.compile(r'^.*\\.sql$')\n sql_files = [os.path.join(sql_dir, f)\n for f in os.listdir(sql_dir)\n if r.match(f) is not None]\n sql_files.sort()\n\n if len(sql_files) == 0:\n logger.warning(\"Empty folder %s\" % sql_dir)\n\n cursor = connection.cursor()\n for sql_file in sql_files:\n try:\n logger.info(\"Loading initial SQL data from '%s'\" % sql_file)\n f = open(sql_file)\n sql = f.read()\n f.close()\n if not settings.TEST:\n # Remove RAISE NOTICE (/!\\ only one-liners)\n sql = re.sub(r\"\\n.*RAISE NOTICE.*\\n\", \"\\n\", sql)\n # TODO: this is the ugliest driver hack ever\n sql = sql.replace('%', '%%')\n\n # Replace curly braces with settings values\n pattern = re.compile(r'{{\\s*(.*)\\s*}}')\n for m in pattern.finditer(sql):\n value = getattr(settings, m.group(1))\n sql = sql.replace(m.group(0), unicode(value))\n cursor.execute(sql)\n except Exception as e:\n logger.critical(\"Failed to install custom SQL file '%s': %s\\n\" %\n (sql_file, e))\n traceback.print_exc()\n raise\n\n\ndef move_models_to_schemas(app_label):\n \"\"\"\n Move models tables to PostgreSQL schemas.\n\n Views, functions and triggers will be moved in Geotrek app SQL files.\n \"\"\"\n app = get_app(app_label)\n default_schema = settings.DATABASE_SCHEMAS.get('default')\n app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)\n\n table_schemas = {}\n for model in get_models(app):\n model_name = model._meta.module_name\n table_name = model._meta.db_table\n model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)\n table_schemas.setdefault(model_schema, []).append(table_name)\n\n for m2m_field in model._meta.many_to_many:\n table_name = m2m_field.db_table\n if table_name:\n table_schemas[model_schema].append(table_name)\n\n cursor = connection.cursor()\n\n for schema_name in table_schemas.keys():\n try:\n sql = \"CREATE SCHEMA %s;\" % model_schema\n cursor.execute(sql)\n logger.info(\"Created schema %s\" % model_schema)\n except Exception:\n logger.debug(\"Schema %s already exists.\" % model_schema)\n\n for schema_name, tables in table_schemas.items():\n for table_name in tables:\n try:\n sql = \"ALTER TABLE %s SET SCHEMA %s;\" % (table_name, schema_name)\n cursor.execute(sql)\n logger.info(\"Moved %s to schema %s\" % (table_name, schema_name))\n except Exception:\n logger.debug(\"Table %s already in schema %s\" % (table_name, schema_name))\n\n # For Django, search_path is set in connection options.\n # But when accessing the database using QGis or ETL, search_path must be\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n dbuser = settings.DATABASES['default']['USER']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n sql = \"ALTER ROLE %s IN DATABASE %s SET search_path=%s;\" % (dbuser, dbname, search_path)\n cursor.execute(sql)\n", "path": "geotrek/common/utils/postgresql.py"}]}
| 2,180 | 235 |
gh_patches_debug_36872
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-3409
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing Access-Control-Allow-Origin in redirect headers
From https://sourceforge.net/p/pypi/support-requests/741/:
> Hi,
>
> Both pypi.org and pypi.python.org do not have proper Access-Control-Allow-Origin response headers in their redirect responses. Please see https://stackoverflow.com/questions/44637138/case-sensitive-url-triggering-cors-error.
>
> It would be very helpful to add these to the headers so that redirect requests are propertly handled.
>
> Thanks!
---
**Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).
</issue>
<code>
[start of warehouse/legacy/api/json.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.view import view_config
15 from sqlalchemy.orm import Load
16 from sqlalchemy.orm.exc import NoResultFound
17
18 from warehouse.cache.http import cache_control
19 from warehouse.cache.origin import origin_cache
20 from warehouse.packaging.models import File, Release
21
22
23 @view_config(
24 route_name="legacy.api.json.project",
25 renderer="json",
26 decorator=[
27 cache_control(15 * 60), # 15 minutes
28 origin_cache(
29 1 * 24 * 60 * 60, # 1 day
30 stale_while_revalidate=5 * 60, # 5 minutes
31 stale_if_error=1 * 24 * 60 * 60, # 1 day
32 ),
33 ],
34 )
35 def json_project(project, request):
36 if project.name != request.matchdict.get("name", project.name):
37 return HTTPMovedPermanently(
38 request.current_route_path(name=project.name),
39 )
40
41 try:
42 release = (
43 request.db.query(Release)
44 .filter(Release.project == project)
45 .order_by(
46 Release.is_prerelease.nullslast(),
47 Release._pypi_ordering.desc())
48 .limit(1)
49 .one()
50 )
51 except NoResultFound:
52 return HTTPNotFound()
53
54 return json_release(release, request)
55
56
57 @view_config(
58 route_name="legacy.api.json.release",
59 renderer="json",
60 decorator=[
61 cache_control(15 * 60), # 15 minutes
62 origin_cache(
63 1 * 24 * 60 * 60, # 1 day
64 stale_while_revalidate=5 * 60, # 5 minutes
65 stale_if_error=1 * 24 * 60 * 60, # 1 day
66 ),
67 ],
68 )
69 def json_release(release, request):
70 project = release.project
71
72 if project.name != request.matchdict.get("name", project.name):
73 return HTTPMovedPermanently(
74 request.current_route_path(name=project.name),
75 )
76
77 # We want to allow CORS here to enable anyone to fetch data from this API
78 request.response.headers["Access-Control-Allow-Origin"] = "*"
79 request.response.headers["Access-Control-Allow-Headers"] = ", ".join([
80 "Content-Type",
81 "If-Match",
82 "If-Modified-Since",
83 "If-None-Match",
84 "If-Unmodified-Since",
85 ])
86 request.response.headers["Access-Control-Allow-Methods"] = "GET"
87 request.response.headers["Access-Control-Max-Age"] = "86400"
88 request.response.headers["Access-Control-Expose-Headers"] = ", ".join([
89 "X-PyPI-Last-Serial",
90 ])
91
92 # Get the latest serial number for this project.
93 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
94
95 # Get all of the releases and files for this project.
96 release_files = (
97 request.db.query(Release, File)
98 .options(Load(Release).load_only('version'))
99 .outerjoin(File)
100 .filter(Release.project == project)
101 .order_by(Release._pypi_ordering.desc(), File.filename)
102 .all()
103 )
104
105 # Map our releases + files into a dictionary that maps each release to a
106 # list of all its files.
107 releases = {}
108 for r, file_ in release_files:
109 files = releases.setdefault(r, [])
110 if file_ is not None:
111 files.append(file_)
112
113 # Serialize our database objects to match the way that PyPI legacy
114 # presented this data.
115 releases = {
116 r.version: [
117 {
118 "filename": f.filename,
119 "packagetype": f.packagetype,
120 "python_version": f.python_version,
121 "has_sig": f.has_signature,
122 "comment_text": f.comment_text,
123 "md5_digest": f.md5_digest,
124 "digests": {
125 "md5": f.md5_digest,
126 "sha256": f.sha256_digest,
127 },
128 "size": f.size,
129 # TODO: Remove this once we've had a long enough time with it
130 # here to consider it no longer in use.
131 "downloads": -1,
132 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
133 "url": request.route_url("packaging.file", path=f.path),
134 }
135 for f in fs
136 ]
137 for r, fs in releases.items()
138 }
139
140 return {
141 "info": {
142 "name": project.name,
143 "version": release.version,
144 "summary": release.summary,
145 "description": release.description,
146 "keywords": release.keywords,
147 "license": release.license,
148 "classifiers": list(release.classifiers),
149 "author": release.author,
150 "author_email": release.author_email,
151 "maintainer": release.maintainer,
152 "maintainer_email": release.maintainer_email,
153 "requires_python": release.requires_python,
154 "platform": release.platform,
155 "downloads": {
156 "last_day": -1,
157 "last_week": -1,
158 "last_month": -1,
159 },
160 "package_url": request.route_url(
161 "packaging.project",
162 name=project.name,
163 ),
164 "project_url": request.route_url(
165 "packaging.project",
166 name=project.name,
167 ),
168 "release_url": request.route_url(
169 "packaging.release",
170 name=project.name,
171 version=release.version,
172 ),
173 "requires_dist": (list(release.requires_dist)
174 if release.requires_dist else None),
175 "docs_url": project.documentation_url,
176 "bugtrack_url": project.bugtrack_url,
177 "home_page": release.home_page,
178 "download_url": release.download_url,
179 },
180 "urls": releases[release.version],
181 "releases": releases,
182 }
183
[end of warehouse/legacy/api/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -20,6 +20,26 @@
from warehouse.packaging.models import File, Release
+# Generate appropriate CORS headers for the JSON endpoint.
+# We want to allow Cross-Origin requests here so that users can interact
+# with these endpoints via XHR/Fetch APIs in the browser.
+_CORS_HEADERS = {
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Headers": ", ".join([
+ "Content-Type",
+ "If-Match",
+ "If-Modified-Since",
+ "If-None-Match",
+ "If-Unmodified-Since",
+ ]),
+ "Access-Control-Allow-Methods": "GET",
+ "Access-Control-Max-Age": "86400", # 1 day.
+ "Access-Control-Expose-Headers": ", ".join([
+ "X-PyPI-Last-Serial",
+ ]),
+}
+
+
@view_config(
route_name="legacy.api.json.project",
renderer="json",
@@ -36,6 +56,7 @@
if project.name != request.matchdict.get("name", project.name):
return HTTPMovedPermanently(
request.current_route_path(name=project.name),
+ headers=_CORS_HEADERS
)
try:
@@ -49,7 +70,7 @@
.one()
)
except NoResultFound:
- return HTTPNotFound()
+ return HTTPNotFound(headers=_CORS_HEADERS)
return json_release(release, request)
@@ -72,22 +93,11 @@
if project.name != request.matchdict.get("name", project.name):
return HTTPMovedPermanently(
request.current_route_path(name=project.name),
+ headers=_CORS_HEADERS
)
- # We want to allow CORS here to enable anyone to fetch data from this API
- request.response.headers["Access-Control-Allow-Origin"] = "*"
- request.response.headers["Access-Control-Allow-Headers"] = ", ".join([
- "Content-Type",
- "If-Match",
- "If-Modified-Since",
- "If-None-Match",
- "If-Unmodified-Since",
- ])
- request.response.headers["Access-Control-Allow-Methods"] = "GET"
- request.response.headers["Access-Control-Max-Age"] = "86400"
- request.response.headers["Access-Control-Expose-Headers"] = ", ".join([
- "X-PyPI-Last-Serial",
- ])
+ # Apply CORS headers.
+ request.response.headers.update(_CORS_HEADERS)
# Get the latest serial number for this project.
request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
|
{"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -20,6 +20,26 @@\n from warehouse.packaging.models import File, Release\n \n \n+# Generate appropriate CORS headers for the JSON endpoint.\n+# We want to allow Cross-Origin requests here so that users can interact\n+# with these endpoints via XHR/Fetch APIs in the browser.\n+_CORS_HEADERS = {\n+ \"Access-Control-Allow-Origin\": \"*\",\n+ \"Access-Control-Allow-Headers\": \", \".join([\n+ \"Content-Type\",\n+ \"If-Match\",\n+ \"If-Modified-Since\",\n+ \"If-None-Match\",\n+ \"If-Unmodified-Since\",\n+ ]),\n+ \"Access-Control-Allow-Methods\": \"GET\",\n+ \"Access-Control-Max-Age\": \"86400\", # 1 day.\n+ \"Access-Control-Expose-Headers\": \", \".join([\n+ \"X-PyPI-Last-Serial\",\n+ ]),\n+}\n+\n+\n @view_config(\n route_name=\"legacy.api.json.project\",\n renderer=\"json\",\n@@ -36,6 +56,7 @@\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n+ headers=_CORS_HEADERS\n )\n \n try:\n@@ -49,7 +70,7 @@\n .one()\n )\n except NoResultFound:\n- return HTTPNotFound()\n+ return HTTPNotFound(headers=_CORS_HEADERS)\n \n return json_release(release, request)\n \n@@ -72,22 +93,11 @@\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n+ headers=_CORS_HEADERS\n )\n \n- # We want to allow CORS here to enable anyone to fetch data from this API\n- request.response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n- request.response.headers[\"Access-Control-Allow-Headers\"] = \", \".join([\n- \"Content-Type\",\n- \"If-Match\",\n- \"If-Modified-Since\",\n- \"If-None-Match\",\n- \"If-Unmodified-Since\",\n- ])\n- request.response.headers[\"Access-Control-Allow-Methods\"] = \"GET\"\n- request.response.headers[\"Access-Control-Max-Age\"] = \"86400\"\n- request.response.headers[\"Access-Control-Expose-Headers\"] = \", \".join([\n- \"X-PyPI-Last-Serial\",\n- ])\n+ # Apply CORS headers.\n+ request.response.headers.update(_CORS_HEADERS)\n \n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n", "issue": "Missing Access-Control-Allow-Origin in redirect headers \nFrom https://sourceforge.net/p/pypi/support-requests/741/:\r\n\r\n> Hi,\r\n> \r\n> Both pypi.org and pypi.python.org do not have proper Access-Control-Allow-Origin response headers in their redirect responses. Please see https://stackoverflow.com/questions/44637138/case-sensitive-url-triggering-cors-error.\r\n> \r\n> It would be very helpful to add these to the headers so that redirect requests are propertly handled.\r\n> \r\n> Thanks!\r\n\r\n---\r\n\r\n**Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Release\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound()\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n # We want to allow CORS here to enable anyone to fetch data from this API\n request.response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n request.response.headers[\"Access-Control-Allow-Headers\"] = \", \".join([\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ])\n request.response.headers[\"Access-Control-Allow-Methods\"] = \"GET\"\n request.response.headers[\"Access-Control-Max-Age\"] = \"86400\"\n request.response.headers[\"Access-Control-Expose-Headers\"] = \", \".join([\n \"X-PyPI-Last-Serial\",\n ])\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(Load(Release).load_only('version'))\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\n \"md5\": f.md5_digest,\n \"sha256\": f.sha256_digest,\n },\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\n \"last_day\": -1,\n \"last_week\": -1,\n \"last_month\": -1,\n },\n \"package_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"project_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"release_url\": request.route_url(\n \"packaging.release\",\n name=project.name,\n version=release.version,\n ),\n \"requires_dist\": (list(release.requires_dist)\n if release.requires_dist else None),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n }\n", "path": "warehouse/legacy/api/json.py"}]}
| 2,654 | 637 |
gh_patches_debug_1867
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-1390
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
1.0 RC 1
Hey all, if there aren't any reports of show-stopping bugs in 1.0b4, I'd like to put out a release candidate on Wednesday 6/27.
</issue>
<code>
[start of rasterio/__init__.py]
1 """Rasterio"""
2
3 from __future__ import absolute_import
4
5 from collections import namedtuple
6 from contextlib import contextmanager
7 import logging
8 import warnings
9
10 try:
11 from pathlib import Path
12 except ImportError: # pragma: no cover
13 class Path:
14 pass
15
16 try:
17 from logging import NullHandler
18 except ImportError: # pragma: no cover
19 class NullHandler(logging.Handler):
20 def emit(self, record):
21 pass
22
23 from rasterio._base import gdal_version
24 from rasterio.drivers import is_blacklisted
25 from rasterio.dtypes import (
26 bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,
27 complex_, check_dtype)
28 from rasterio.env import ensure_env_credentialled, Env
29 from rasterio.errors import RasterioIOError
30 from rasterio.compat import string_types
31 from rasterio.io import (
32 DatasetReader, get_writer_for_path, get_writer_for_driver, MemoryFile)
33 from rasterio.profiles import default_gtiff_profile
34 from rasterio.transform import Affine, guard_transform
35 from rasterio.path import parse_path
36
37 # These modules are imported from the Cython extensions, but are also import
38 # here to help tools like cx_Freeze find them automatically
39 import rasterio._err
40 import rasterio.coords
41 import rasterio.enums
42 import rasterio.path
43
44
45 __all__ = ['band', 'open', 'pad']
46 __version__ = "1.0b4"
47 __gdal_version__ = gdal_version()
48
49 # Rasterio attaches NullHandler to the 'rasterio' logger and its
50 # descendents. See
51 # https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
52 # Applications must attach their own handlers in order to see messages.
53 # See rasterio/rio/main.py for an example.
54 log = logging.getLogger(__name__)
55 log.addHandler(NullHandler())
56
57
58 @ensure_env_credentialled
59 def open(fp, mode='r', driver=None, width=None, height=None, count=None,
60 crs=None, transform=None, dtype=None, nodata=None, sharing=True,
61 **kwargs):
62 """Open a dataset for reading or writing.
63
64 The dataset may be located in a local file, in a resource located by
65 a URL, or contained within a stream of bytes.
66
67 In read ('r') or read/write ('r+') mode, no keyword arguments are
68 required: these attributes are supplied by the opened dataset.
69
70 In write ('w' or 'w+') mode, the driver, width, height, count, and dtype
71 keywords are strictly required.
72
73 Parameters
74 ----------
75 fp : str, file object or pathlib.Path object
76 A filename or URL, a file object opened in binary ('rb') mode,
77 or a Path object.
78 mode : str, optional
79 'r' (read, the default), 'r+' (read/write), 'w' (write), or
80 'w+' (write/read).
81 driver : str, optional
82 A short format driver name (e.g. "GTiff" or "JPEG") or a list of
83 such names (see GDAL docs at
84 http://www.gdal.org/formats_list.html). In 'w' or 'w+' modes
85 a single name is required. In 'r' or 'r+' modes the driver can
86 usually be omitted. Registered drivers will be tried
87 sequentially until a match is found. When multiple drivers are
88 available for a format such as JPEG2000, one of them can be
89 selected by using this keyword argument.
90 width, height : int, optional
91 The numbers of rows and columns of the raster dataset. Required
92 in 'w' or 'w+' modes, they are ignored in 'r' or 'r+' modes.
93 count : int, optional
94 The count of dataset bands. Required in 'w' or 'w+' modes, it is
95 ignored in 'r' or 'r+' modes.
96 dtype : str or numpy dtype
97 The data type for bands. For example: 'uint8' or
98 ``rasterio.uint16``. Required in 'w' or 'w+' modes, it is
99 ignored in 'r' or 'r+' modes.
100 crs : str, dict, or CRS; optional
101 The coordinate reference system. Required in 'w' or 'w+' modes,
102 it is ignored in 'r' or 'r+' modes.
103 transform : Affine instance, optional
104 Affine transformation mapping the pixel space to geographic
105 space. Required in 'w' or 'w+' modes, it is ignored in 'r' or
106 'r+' modes.
107 nodata : int, float, or nan; optional
108 Defines the pixel value to be interpreted as not valid data.
109 Required in 'w' or 'w+' modes, it is ignored in 'r' or 'r+'
110 modes.
111 sharing : bool
112 A flag that allows sharing of dataset handles. Default is
113 `True`. Should be set to `False` in a multithreaded:w program.
114 kwargs : optional
115 These are passed to format drivers as directives for creating or
116 interpreting datasets. For example: in 'w' or 'w+' modes
117 a `tiled=True` keyword argument will direct the GeoTIFF format
118 driver to create a tiled, rather than striped, TIFF.
119
120 Returns
121 -------
122 A ``DatasetReader`` or ``DatasetUpdater`` object.
123
124 Examples
125 --------
126
127 To open a GeoTIFF for reading using standard driver discovery and
128 no directives:
129
130 >>> import rasterio
131 >>> with rasterio.open('example.tif') as dataset:
132 ... print(dataset.profile)
133
134 To open a JPEG2000 using only the JP2OpenJPEG driver:
135
136 >>> with rasterio.open(
137 ... 'example.jp2', driver='JP2OpenJPEG') as dataset:
138 ... print(dataset.profile)
139
140 To create a new 8-band, 16-bit unsigned, tiled, and LZW-compressed
141 GeoTIFF with a global extent and 0.5 degree resolution:
142
143 >>> from rasterio.transform import from_origin
144 >>> with rasterio.open(
145 ... 'example.tif', 'w', driver='GTiff', dtype='uint16',
146 ... width=720, height=360, count=8, crs='EPSG:4326',
147 ... transform=from_origin(-180.0, 90.0, 0.5, 0.5),
148 ... nodata=0, tiled=True, compress='lzw') as dataset:
149 ... dataset.write(...)
150 """
151
152 if not isinstance(fp, string_types):
153 if not (hasattr(fp, 'read') or hasattr(fp, 'write') or isinstance(fp, Path)):
154 raise TypeError("invalid path or file: {0!r}".format(fp))
155 if mode and not isinstance(mode, string_types):
156 raise TypeError("invalid mode: {0!r}".format(mode))
157 if driver and not isinstance(driver, string_types):
158 raise TypeError("invalid driver: {0!r}".format(driver))
159 if dtype and not check_dtype(dtype):
160 raise TypeError("invalid dtype: {0!r}".format(dtype))
161 if nodata is not None:
162 nodata = float(nodata)
163 if transform:
164 transform = guard_transform(transform)
165
166 # Check driver/mode blacklist.
167 if driver and is_blacklisted(driver, mode):
168 raise RasterioIOError(
169 "Blacklisted: file cannot be opened by "
170 "driver '{0}' in '{1}' mode".format(driver, mode))
171
172 # Special case for file object argument.
173 if mode == 'r' and hasattr(fp, 'read'):
174
175 @contextmanager
176 def fp_reader(fp):
177 memfile = MemoryFile(fp.read())
178 dataset = memfile.open()
179 try:
180 yield dataset
181 finally:
182 dataset.close()
183 memfile.close()
184
185 return fp_reader(fp)
186
187 elif mode in ('w', 'w+') and hasattr(fp, 'write'):
188
189 @contextmanager
190 def fp_writer(fp):
191 memfile = MemoryFile()
192 dataset = memfile.open(driver=driver, width=width, height=height,
193 count=count, crs=crs, transform=transform,
194 dtype=dtype, nodata=nodata, **kwargs)
195 try:
196 yield dataset
197 finally:
198 dataset.close()
199 memfile.seek(0)
200 fp.write(memfile.read())
201 memfile.close()
202
203 return fp_writer(fp)
204
205 else:
206 # If a pathlib.Path instance is given, convert it to a string path.
207 if isinstance(fp, Path):
208 fp = str(fp)
209
210 # The 'normal' filename or URL path.
211 path = parse_path(fp)
212
213 # Create dataset instances and pass the given env, which will
214 # be taken over by the dataset's context manager if it is not
215 # None.
216 if mode == 'r':
217 s = DatasetReader(path, driver=driver, **kwargs)
218 elif mode == 'r+':
219 s = get_writer_for_path(path)(path, mode, driver=driver, **kwargs)
220 elif mode.startswith("w"):
221 s = get_writer_for_driver(driver)(path, mode, driver=driver,
222 width=width, height=height,
223 count=count, crs=crs,
224 transform=transform,
225 dtype=dtype, nodata=nodata,
226 **kwargs)
227 else:
228 raise ValueError(
229 "mode must be one of 'r', 'r+', or 'w', not %s" % mode)
230 return s
231
232
233 Band = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])
234
235
236 def band(ds, bidx):
237 """A dataset and one or more of its bands
238
239 Parameters
240 ----------
241 ds: dataset object
242 An opened rasterio dataset object.
243 bidx: int or sequence of ints
244 Band number(s), index starting at 1.
245
246 Returns
247 -------
248 rasterio.Band
249 """
250 return Band(ds, bidx, set(ds.dtypes).pop(), ds.shape)
251
252
253 def pad(array, transform, pad_width, mode=None, **kwargs):
254 """pad array and adjust affine transform matrix.
255
256 Parameters
257 ----------
258 array: ndarray
259 Numpy ndarray, for best results a 2D array
260 transform: Affine transform
261 transform object mapping pixel space to coordinates
262 pad_width: int
263 number of pixels to pad array on all four
264 mode: str or function
265 define the method for determining padded values
266
267 Returns
268 -------
269 (array, transform): tuple
270 Tuple of new array and affine transform
271
272 Notes
273 -----
274 See numpy docs for details on mode and other kwargs:
275 http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html
276 """
277 import numpy as np
278 transform = guard_transform(transform)
279 padded_array = np.pad(array, pad_width, mode, **kwargs)
280 padded_trans = list(transform)
281 padded_trans[2] -= pad_width * padded_trans[0]
282 padded_trans[5] -= pad_width * padded_trans[4]
283 return padded_array, Affine(*padded_trans[:6])
284
[end of rasterio/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rasterio/__init__.py b/rasterio/__init__.py
--- a/rasterio/__init__.py
+++ b/rasterio/__init__.py
@@ -42,8 +42,8 @@
import rasterio.path
-__all__ = ['band', 'open', 'pad']
-__version__ = "1.0b4"
+__all__ = ['band', 'open', 'pad', 'Env']
+__version__ = "1.0rc1"
__gdal_version__ = gdal_version()
# Rasterio attaches NullHandler to the 'rasterio' logger and its
|
{"golden_diff": "diff --git a/rasterio/__init__.py b/rasterio/__init__.py\n--- a/rasterio/__init__.py\n+++ b/rasterio/__init__.py\n@@ -42,8 +42,8 @@\n import rasterio.path\n \n \n-__all__ = ['band', 'open', 'pad']\n-__version__ = \"1.0b4\"\n+__all__ = ['band', 'open', 'pad', 'Env']\n+__version__ = \"1.0rc1\"\n __gdal_version__ = gdal_version()\n \n # Rasterio attaches NullHandler to the 'rasterio' logger and its\n", "issue": "1.0 RC 1\nHey all, if there aren't any reports of show-stopping bugs in 1.0b4, I'd like to put out a release candidate on Wednesday 6/27.\n", "before_files": [{"content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nimport logging\nimport warnings\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n class Path:\n pass\n\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nfrom rasterio._base import gdal_version\nfrom rasterio.drivers import is_blacklisted\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_, check_dtype)\nfrom rasterio.env import ensure_env_credentialled, Env\nfrom rasterio.errors import RasterioIOError\nfrom rasterio.compat import string_types\nfrom rasterio.io import (\n DatasetReader, get_writer_for_path, get_writer_for_driver, MemoryFile)\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\nfrom rasterio.path import parse_path\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nimport rasterio._err\nimport rasterio.coords\nimport rasterio.enums\nimport rasterio.path\n\n\n__all__ = ['band', 'open', 'pad']\n__version__ = \"1.0b4\"\n__gdal_version__ = gdal_version()\n\n# Rasterio attaches NullHandler to the 'rasterio' logger and its\n# descendents. See\n# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library\n# Applications must attach their own handlers in order to see messages.\n# See rasterio/rio/main.py for an example.\nlog = logging.getLogger(__name__)\nlog.addHandler(NullHandler())\n\n\n@ensure_env_credentialled\ndef open(fp, mode='r', driver=None, width=None, height=None, count=None,\n crs=None, transform=None, dtype=None, nodata=None, sharing=True,\n **kwargs):\n \"\"\"Open a dataset for reading or writing.\n\n The dataset may be located in a local file, in a resource located by\n a URL, or contained within a stream of bytes.\n\n In read ('r') or read/write ('r+') mode, no keyword arguments are\n required: these attributes are supplied by the opened dataset.\n\n In write ('w' or 'w+') mode, the driver, width, height, count, and dtype\n keywords are strictly required.\n\n Parameters\n ----------\n fp : str, file object or pathlib.Path object\n A filename or URL, a file object opened in binary ('rb') mode,\n or a Path object.\n mode : str, optional\n 'r' (read, the default), 'r+' (read/write), 'w' (write), or\n 'w+' (write/read).\n driver : str, optional\n A short format driver name (e.g. \"GTiff\" or \"JPEG\") or a list of\n such names (see GDAL docs at\n http://www.gdal.org/formats_list.html). In 'w' or 'w+' modes\n a single name is required. In 'r' or 'r+' modes the driver can\n usually be omitted. Registered drivers will be tried\n sequentially until a match is found. When multiple drivers are\n available for a format such as JPEG2000, one of them can be\n selected by using this keyword argument.\n width, height : int, optional\n The numbers of rows and columns of the raster dataset. Required\n in 'w' or 'w+' modes, they are ignored in 'r' or 'r+' modes.\n count : int, optional\n The count of dataset bands. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n dtype : str or numpy dtype\n The data type for bands. For example: 'uint8' or\n ``rasterio.uint16``. Required in 'w' or 'w+' modes, it is\n ignored in 'r' or 'r+' modes.\n crs : str, dict, or CRS; optional\n The coordinate reference system. Required in 'w' or 'w+' modes,\n it is ignored in 'r' or 'r+' modes.\n transform : Affine instance, optional\n Affine transformation mapping the pixel space to geographic\n space. Required in 'w' or 'w+' modes, it is ignored in 'r' or\n 'r+' modes.\n nodata : int, float, or nan; optional\n Defines the pixel value to be interpreted as not valid data.\n Required in 'w' or 'w+' modes, it is ignored in 'r' or 'r+'\n modes.\n sharing : bool\n A flag that allows sharing of dataset handles. Default is\n `True`. Should be set to `False` in a multithreaded:w program.\n kwargs : optional\n These are passed to format drivers as directives for creating or\n interpreting datasets. For example: in 'w' or 'w+' modes\n a `tiled=True` keyword argument will direct the GeoTIFF format\n driver to create a tiled, rather than striped, TIFF.\n\n Returns\n -------\n A ``DatasetReader`` or ``DatasetUpdater`` object.\n\n Examples\n --------\n\n To open a GeoTIFF for reading using standard driver discovery and\n no directives:\n\n >>> import rasterio\n >>> with rasterio.open('example.tif') as dataset:\n ... print(dataset.profile)\n\n To open a JPEG2000 using only the JP2OpenJPEG driver:\n\n >>> with rasterio.open(\n ... 'example.jp2', driver='JP2OpenJPEG') as dataset:\n ... print(dataset.profile)\n\n To create a new 8-band, 16-bit unsigned, tiled, and LZW-compressed\n GeoTIFF with a global extent and 0.5 degree resolution:\n\n >>> from rasterio.transform import from_origin\n >>> with rasterio.open(\n ... 'example.tif', 'w', driver='GTiff', dtype='uint16',\n ... width=720, height=360, count=8, crs='EPSG:4326',\n ... transform=from_origin(-180.0, 90.0, 0.5, 0.5),\n ... nodata=0, tiled=True, compress='lzw') as dataset:\n ... dataset.write(...)\n \"\"\"\n\n if not isinstance(fp, string_types):\n if not (hasattr(fp, 'read') or hasattr(fp, 'write') or isinstance(fp, Path)):\n raise TypeError(\"invalid path or file: {0!r}\".format(fp))\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: {0!r}\".format(mode))\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: {0!r}\".format(driver))\n if dtype and not check_dtype(dtype):\n raise TypeError(\"invalid dtype: {0!r}\".format(dtype))\n if nodata is not None:\n nodata = float(nodata)\n if transform:\n transform = guard_transform(transform)\n\n # Check driver/mode blacklist.\n if driver and is_blacklisted(driver, mode):\n raise RasterioIOError(\n \"Blacklisted: file cannot be opened by \"\n \"driver '{0}' in '{1}' mode\".format(driver, mode))\n\n # Special case for file object argument.\n if mode == 'r' and hasattr(fp, 'read'):\n\n @contextmanager\n def fp_reader(fp):\n memfile = MemoryFile(fp.read())\n dataset = memfile.open()\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.close()\n\n return fp_reader(fp)\n\n elif mode in ('w', 'w+') and hasattr(fp, 'write'):\n\n @contextmanager\n def fp_writer(fp):\n memfile = MemoryFile()\n dataset = memfile.open(driver=driver, width=width, height=height,\n count=count, crs=crs, transform=transform,\n dtype=dtype, nodata=nodata, **kwargs)\n try:\n yield dataset\n finally:\n dataset.close()\n memfile.seek(0)\n fp.write(memfile.read())\n memfile.close()\n\n return fp_writer(fp)\n\n else:\n # If a pathlib.Path instance is given, convert it to a string path.\n if isinstance(fp, Path):\n fp = str(fp)\n\n # The 'normal' filename or URL path.\n path = parse_path(fp)\n\n # Create dataset instances and pass the given env, which will\n # be taken over by the dataset's context manager if it is not\n # None.\n if mode == 'r':\n s = DatasetReader(path, driver=driver, **kwargs)\n elif mode == 'r+':\n s = get_writer_for_path(path)(path, mode, driver=driver, **kwargs)\n elif mode.startswith(\"w\"):\n s = get_writer_for_driver(driver)(path, mode, driver=driver,\n width=width, height=height,\n count=count, crs=crs,\n transform=transform,\n dtype=dtype, nodata=nodata,\n **kwargs)\n else:\n raise ValueError(\n \"mode must be one of 'r', 'r+', or 'w', not %s\" % mode)\n return s\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\n\ndef band(ds, bidx):\n \"\"\"A dataset and one or more of its bands\n\n Parameters\n ----------\n ds: dataset object\n An opened rasterio dataset object.\n bidx: int or sequence of ints\n Band number(s), index starting at 1.\n\n Returns\n -------\n rasterio.Band\n \"\"\"\n return Band(ds, bidx, set(ds.dtypes).pop(), ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"pad array and adjust affine transform matrix.\n\n Parameters\n ----------\n array: ndarray\n Numpy ndarray, for best results a 2D array\n transform: Affine transform\n transform object mapping pixel space to coordinates\n pad_width: int\n number of pixels to pad array on all four\n mode: str or function\n define the method for determining padded values\n\n Returns\n -------\n (array, transform): tuple\n Tuple of new array and affine transform\n\n Notes\n -----\n See numpy docs for details on mode and other kwargs:\n http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html\n \"\"\"\n import numpy as np\n transform = guard_transform(transform)\n padded_array = np.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width * padded_trans[0]\n padded_trans[5] -= pad_width * padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n", "path": "rasterio/__init__.py"}]}
| 3,806 | 142 |
gh_patches_debug_851
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-1893
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'pwn cyclic -o afca' throws a BytesWarning
```
$ pwn cyclic -o afca
/Users/heapcrash/pwntools/pwnlib/commandline/cyclic.py:74: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes
pat = flat(pat, bytes=args.length)
506
```
</issue>
<code>
[start of pwnlib/commandline/cyclic.py]
1 #!/usr/bin/env python2
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import argparse
6 import six
7 import string
8 import sys
9
10 import pwnlib.args
11 pwnlib.args.free_form = False
12
13 from pwn import *
14 from pwnlib.commandline import common
15
16 parser = common.parser_commands.add_parser(
17 'cyclic',
18 help = "Cyclic pattern creator/finder",
19 description = "Cyclic pattern creator/finder"
20 )
21
22 parser.add_argument(
23 '-a', '--alphabet',
24 metavar = 'alphabet',
25 default = string.ascii_lowercase.encode(),
26 type = packing._encode,
27 help = 'The alphabet to use in the cyclic pattern (defaults to all lower case letters)',
28 )
29
30 parser.add_argument(
31 '-n', '--length',
32 metavar = 'length',
33 default = 4,
34 type = int,
35 help = 'Size of the unique subsequences (defaults to 4).'
36 )
37
38 parser.add_argument(
39 '-c', '--context',
40 metavar = 'context',
41 action = 'append',
42 type = common.context_arg,
43 choices = common.choices,
44 help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,
45 )
46
47 group = parser.add_mutually_exclusive_group(required=False)
48 group.add_argument(
49 '-l', '-o', '--offset', '--lookup',
50 dest = 'lookup',
51 metavar = 'lookup_value',
52 help = 'Do a lookup instead printing the alphabet',
53 )
54
55 group.add_argument(
56 'count',
57 type=int,
58 nargs='?',
59 default=None,
60 help='Number of characters to print'
61 )
62
63 def main(args):
64 alphabet = args.alphabet
65 subsize = args.length
66
67 if args.lookup:
68 pat = args.lookup
69
70 try:
71 pat = int(pat, 0)
72 except ValueError:
73 pass
74 pat = flat(pat, bytes=args.length)
75
76 if len(pat) != subsize:
77 log.critical('Subpattern must be %d bytes' % subsize)
78 sys.exit(1)
79
80 if not all(c in alphabet for c in pat):
81 log.critical('Pattern contains characters not present in the alphabet')
82 sys.exit(1)
83
84 offset = cyclic_find(pat, alphabet, subsize)
85
86 if offset == -1:
87 log.critical('Given pattern does not exist in cyclic pattern')
88 sys.exit(1)
89 else:
90 print(offset)
91 else:
92 want = args.count
93 result = cyclic(want, alphabet, subsize)
94 got = len(result)
95 if want is not None and got < want:
96 log.failure("Alphabet too small (max length = %i)" % got)
97
98 out = getattr(sys.stdout, 'buffer', sys.stdout)
99 out.write(result)
100
101 if out.isatty():
102 out.write(b'\n')
103
104 if __name__ == '__main__':
105 pwnlib.commandline.common.main(__file__)
106
[end of pwnlib/commandline/cyclic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwnlib/commandline/cyclic.py b/pwnlib/commandline/cyclic.py
--- a/pwnlib/commandline/cyclic.py
+++ b/pwnlib/commandline/cyclic.py
@@ -67,6 +67,9 @@
if args.lookup:
pat = args.lookup
+ if six.PY3:
+ pat = bytes(pat, encoding='utf-8')
+
try:
pat = int(pat, 0)
except ValueError:
|
{"golden_diff": "diff --git a/pwnlib/commandline/cyclic.py b/pwnlib/commandline/cyclic.py\n--- a/pwnlib/commandline/cyclic.py\n+++ b/pwnlib/commandline/cyclic.py\n@@ -67,6 +67,9 @@\n if args.lookup:\n pat = args.lookup\n \n+ if six.PY3:\n+ pat = bytes(pat, encoding='utf-8')\n+\n try:\n pat = int(pat, 0)\n except ValueError:\n", "issue": "'pwn cyclic -o afca' throws a BytesWarning\n\r\n```\r\n$ pwn cyclic -o afca\r\n/Users/heapcrash/pwntools/pwnlib/commandline/cyclic.py:74: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes\r\n pat = flat(pat, bytes=args.length)\r\n506\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport six\nimport string\nimport sys\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nparser = common.parser_commands.add_parser(\n 'cyclic',\n help = \"Cyclic pattern creator/finder\",\n description = \"Cyclic pattern creator/finder\"\n)\n\nparser.add_argument(\n '-a', '--alphabet',\n metavar = 'alphabet',\n default = string.ascii_lowercase.encode(),\n type = packing._encode,\n help = 'The alphabet to use in the cyclic pattern (defaults to all lower case letters)',\n)\n\nparser.add_argument(\n '-n', '--length',\n metavar = 'length',\n default = 4,\n type = int,\n help = 'Size of the unique subsequences (defaults to 4).'\n)\n\nparser.add_argument(\n '-c', '--context',\n metavar = 'context',\n action = 'append',\n type = common.context_arg,\n choices = common.choices,\n help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,\n)\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n '-l', '-o', '--offset', '--lookup',\n dest = 'lookup',\n metavar = 'lookup_value',\n help = 'Do a lookup instead printing the alphabet',\n)\n\ngroup.add_argument(\n 'count',\n type=int,\n nargs='?',\n default=None,\n help='Number of characters to print'\n)\n\ndef main(args):\n alphabet = args.alphabet\n subsize = args.length\n\n if args.lookup:\n pat = args.lookup\n\n try:\n pat = int(pat, 0)\n except ValueError:\n pass\n pat = flat(pat, bytes=args.length)\n\n if len(pat) != subsize:\n log.critical('Subpattern must be %d bytes' % subsize)\n sys.exit(1)\n\n if not all(c in alphabet for c in pat):\n log.critical('Pattern contains characters not present in the alphabet')\n sys.exit(1)\n\n offset = cyclic_find(pat, alphabet, subsize)\n\n if offset == -1:\n log.critical('Given pattern does not exist in cyclic pattern')\n sys.exit(1)\n else:\n print(offset)\n else:\n want = args.count\n result = cyclic(want, alphabet, subsize)\n got = len(result)\n if want is not None and got < want:\n log.failure(\"Alphabet too small (max length = %i)\" % got)\n\n out = getattr(sys.stdout, 'buffer', sys.stdout)\n out.write(result)\n\n if out.isatty():\n out.write(b'\\n')\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/cyclic.py"}]}
| 1,496 | 105 |
gh_patches_debug_53094
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-926
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make --host a required switch
`--host` is currently optional, and defaults to `localhost`. The old behavior was to default to `0.0.0.0`, which is not a particularly sane default. However, the new default makes things confusing, since it is applied silently - things just work differently. Changing the switch to be explicit solves that problem, while also forcing the user to consider the security implications of either choice.
</issue>
<code>
[start of ptvsd/__main__.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import argparse
6 import os.path
7 import sys
8
9 from ptvsd._attach import attach_main
10 from ptvsd._local import debug_main, run_main
11 from ptvsd.socket import Address
12 from ptvsd.version import __version__, __author__ # noqa
13
14
15 ##################################
16 # the script
17
18 """
19 For the PyDevd CLI handling see:
20
21 https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py
22 https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)
23 """ # noqa
24
25 PYDEVD_OPTS = {
26 '--file',
27 '--vm_type',
28 }
29
30 PYDEVD_FLAGS = {
31 '--DEBUG',
32 '--DEBUG_RECORD_SOCKET_READS',
33 '--cmd-line',
34 '--module',
35 '--multiproc',
36 '--multiprocess',
37 '--print-in-debugger-startup',
38 '--save-signatures',
39 '--save-threading',
40 '--save-asyncio',
41 '--server',
42 '--qt-support=auto',
43 }
44
45 USAGE = """
46 {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]
47 {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]
48 {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
49 """ # noqa
50
51
52 def parse_args(argv=None):
53 """Return the parsed args to use in main()."""
54 if argv is None:
55 argv = sys.argv
56 prog = argv[0]
57 if prog == __file__:
58 prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))
59 else:
60 prog = argv[0]
61 argv = argv[1:]
62
63 supported, pydevd, script = _group_args(argv)
64 args = _parse_args(prog, supported)
65 # '--' is used in _run_args to extract pydevd specific args
66 extra = pydevd + ['--']
67 if script:
68 extra += script
69 return args, extra
70
71
72 def _group_args(argv):
73 supported = []
74 pydevd = []
75 script = []
76
77 try:
78 pos = argv.index('--')
79 except ValueError:
80 script = []
81 else:
82 script = argv[pos + 1:]
83 argv = argv[:pos]
84
85 for arg in argv:
86 if arg == '-h' or arg == '--help':
87 return argv, [], script
88
89 gottarget = False
90 skip = 0
91 for i in range(len(argv)):
92 if skip:
93 skip -= 1
94 continue
95
96 arg = argv[i]
97 try:
98 nextarg = argv[i + 1]
99 except IndexError:
100 nextarg = None
101
102 # TODO: Deprecate the PyDevd arg support.
103 # PyDevd support
104 if gottarget:
105 script = argv[i:] + script
106 break
107 if arg == '--file':
108 if nextarg is None: # The filename is missing...
109 pydevd.append(arg)
110 continue # This will get handled later.
111 if nextarg.endswith(':') and '--module' in pydevd:
112 pydevd.remove('--module')
113 arg = '-m'
114 argv[i + 1] = nextarg = nextarg[:-1]
115 else:
116 arg = nextarg
117 skip += 1
118
119 if arg in PYDEVD_OPTS:
120 pydevd.append(arg)
121 if nextarg is not None:
122 pydevd.append(nextarg)
123 skip += 1
124 elif arg in PYDEVD_FLAGS:
125 pydevd.append(arg)
126 elif arg == '--nodebug':
127 supported.append(arg)
128
129 # ptvsd support
130 elif arg in ('--host', '--port', '--pid', '-m'):
131 if arg == '-m' or arg == '--pid':
132 gottarget = True
133 supported.append(arg)
134 if nextarg is not None:
135 supported.append(nextarg)
136 skip += 1
137 elif arg in ('--single-session', '--wait', '--client'):
138 supported.append(arg)
139 elif not arg.startswith('-'):
140 supported.append(arg)
141 gottarget = True
142
143 # unsupported arg
144 else:
145 supported.append(arg)
146 break
147
148 return supported, pydevd, script
149
150
151 def _parse_args(prog, argv):
152 parser = argparse.ArgumentParser(
153 prog=prog,
154 usage=USAGE.format(prog),
155 )
156
157 parser.add_argument('--nodebug', action='store_true')
158 parser.add_argument('--client', action='store_true')
159
160 parser.add_argument('--host')
161 parser.add_argument('--port', type=int, required=True)
162
163 target = parser.add_mutually_exclusive_group(required=True)
164 target.add_argument('-m', dest='module')
165 target.add_argument('--pid', type=int)
166 target.add_argument('filename', nargs='?')
167
168 parser.add_argument('--single-session', action='store_true')
169 parser.add_argument('--wait', action='store_true')
170
171 parser.add_argument('-V', '--version', action='version')
172 parser.version = __version__
173
174 args = parser.parse_args(argv)
175 ns = vars(args)
176
177 host = ns.pop('host', None)
178 port = ns.pop('port')
179 client = ns.pop('client')
180 args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa
181
182 pid = ns.pop('pid')
183 module = ns.pop('module')
184 filename = ns.pop('filename')
185 if pid is not None:
186 args.name = pid
187 args.kind = 'pid'
188 elif module is not None:
189 args.name = module
190 args.kind = 'module'
191 else:
192 args.name = filename
193 args.kind = 'script'
194
195 return args
196
197
198 def handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):
199 if kind == 'pid':
200 attach_main(addr, name, *extra, **kwargs)
201 elif nodebug:
202 run_main(addr, name, kind, *extra, **kwargs)
203 else:
204 debug_main(addr, name, kind, *extra, **kwargs)
205
206
207 def main(argv=None):
208 args, extra = parse_args(argv)
209 handle_args(args.address, args.name, args.kind, extra,
210 nodebug=args.nodebug, singlesession=args.single_session,
211 wait=args.wait)
212
213
214 if __name__ == '__main__':
215 main()
216
[end of ptvsd/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py
--- a/ptvsd/__main__.py
+++ b/ptvsd/__main__.py
@@ -157,7 +157,7 @@
parser.add_argument('--nodebug', action='store_true')
parser.add_argument('--client', action='store_true')
- parser.add_argument('--host')
+ parser.add_argument('--host', required=True)
parser.add_argument('--port', type=int, required=True)
target = parser.add_mutually_exclusive_group(required=True)
|
{"golden_diff": "diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py\n--- a/ptvsd/__main__.py\n+++ b/ptvsd/__main__.py\n@@ -157,7 +157,7 @@\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n \n- parser.add_argument('--host')\n+ parser.add_argument('--host', required=True)\n parser.add_argument('--port', type=int, required=True)\n \n target = parser.add_mutually_exclusive_group(required=True)\n", "issue": "Make --host a required switch\n`--host` is currently optional, and defaults to `localhost`. The old behavior was to default to `0.0.0.0`, which is not a particularly sane default. However, the new default makes things confusing, since it is applied silently - things just work differently. Changing the switch to be explicit solves that problem, while also forcing the user to consider the security implications of either choice.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}]}
| 2,634 | 130 |
gh_patches_debug_14472
|
rasdani/github-patches
|
git_diff
|
mytardis__mytardis-1507
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NPM_FILE_PATTERNS - collectstatic failing on Windows
MyTardis currently uses `django-npm` to collect static content which has been npm installed into the `node_modules/` folder.
Because we don't necessarily want to copy everything from `node_modules/` when running `collectstatic`, MyTardis uses django-npm's `NPM_FILE_PATTERNS` setting in `tardis/default_settings/static_files.py`
See: https://github.com/kevin1024/django-npm#configuration
It can be used like this:
```
NPM_FILE_PATTERNS = {
'jquery': ['*'],
'jquery-migrate': ['*'],
}
```
to copy everything within `node_modules/jquery/` and everything within `node_modules/jquery-migrate/` into the static folder written to by `collectstatic`.
If you only want `collectstatic` to copy a subset of the files in the node_modules folder, `django-npm` provides the ability to use glob patterns like this:
```
NPM_FILE_PATTERNS = {
'bootstrap': ['dist/*'],
'font-awesome': ['css/*', 'fonts/*'],
}
```
However, these glob patterns don't seem to work on Windows, i.e. nothing is copied from the `node_modules` folders which have glob patterns more complex than `['*']`, see: https://github.com/kevin1024/django-npm/issues/15
A workaround (when running MyTardis's `collectstatic` on Windows) is to redefine `NPM_FILE_PATTERNS` in your `tardis/settings.py`, and just use the `['*']` pattern for every node module you want to be copied by `collectstatic`.
</issue>
<code>
[start of tardis/default_settings/static_files.py]
1 from os import path
2 from .storage import DEFAULT_STORAGE_BASE_DIR
3
4 # Absolute path to the directory that holds media.
5 # Example: "/home/media/media.lawrence.com/"
6 MEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR
7
8 # Used by "django collectstatic"
9 STATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '../..', 'static'))
10
11 # Use cachable copies of static files
12 STATICFILES_STORAGE = \
13 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
14
15 STATICFILES_FINDERS = (
16 'django.contrib.staticfiles.finders.FileSystemFinder',
17 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
18 'npm.finders.NpmFinder',
19 )
20
21 # django-npm settings:
22 NPM_ROOT_PATH = path.abspath(path.join(path.dirname(__file__), '../..'))
23
24 # If you have run "npm install", rather than "npm install --production",
25 # you will get a lot of devDependencies installed in node_modules/ which
26 # are only needed for development/testing (e.g. "npm test") and don't
27 # need to be copied when running collectstatic. NPM_FILE_PATTERNS
28 # specifies the folders within node_modules/ which do need to be copied:
29 NPM_FILE_PATTERNS = {
30 'angular': ['*'],
31 'angular-resource': ['*'],
32 'backbone': ['*'],
33 'backbone-forms': ['*'],
34 'blueimp-file-upload': ['*'],
35 'bootstrap': ['dist/*'],
36 'bootstrap-3-typeahead': ['*'],
37 'clipboard': ['*'],
38 'font-awesome': ['css/*', 'fonts/*'],
39 'jquery': ['*'],
40 'jquery-migrate': ['*'],
41 'jquery-ui-dist': ['jquery-ui.min.js'],
42 'mustache': ['mustache.min.js'],
43 'ng-dialog': ['*'],
44 'sprintf-js': ['dist/*'],
45 'underscore': ['*'],
46 'underscore.string': ['dist/*']
47 }
48
[end of tardis/default_settings/static_files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tardis/default_settings/static_files.py b/tardis/default_settings/static_files.py
--- a/tardis/default_settings/static_files.py
+++ b/tardis/default_settings/static_files.py
@@ -32,16 +32,16 @@
'backbone': ['*'],
'backbone-forms': ['*'],
'blueimp-file-upload': ['*'],
- 'bootstrap': ['dist/*'],
+ 'bootstrap': ['*'],
'bootstrap-3-typeahead': ['*'],
'clipboard': ['*'],
- 'font-awesome': ['css/*', 'fonts/*'],
+ 'font-awesome': ['*'],
'jquery': ['*'],
'jquery-migrate': ['*'],
'jquery-ui-dist': ['jquery-ui.min.js'],
'mustache': ['mustache.min.js'],
'ng-dialog': ['*'],
- 'sprintf-js': ['dist/*'],
+ 'sprintf-js': ['*'],
'underscore': ['*'],
- 'underscore.string': ['dist/*']
+ 'underscore.string': ['*']
}
|
{"golden_diff": "diff --git a/tardis/default_settings/static_files.py b/tardis/default_settings/static_files.py\n--- a/tardis/default_settings/static_files.py\n+++ b/tardis/default_settings/static_files.py\n@@ -32,16 +32,16 @@\n 'backbone': ['*'],\n 'backbone-forms': ['*'],\n 'blueimp-file-upload': ['*'],\n- 'bootstrap': ['dist/*'],\n+ 'bootstrap': ['*'],\n 'bootstrap-3-typeahead': ['*'],\n 'clipboard': ['*'],\n- 'font-awesome': ['css/*', 'fonts/*'],\n+ 'font-awesome': ['*'],\n 'jquery': ['*'],\n 'jquery-migrate': ['*'],\n 'jquery-ui-dist': ['jquery-ui.min.js'],\n 'mustache': ['mustache.min.js'],\n 'ng-dialog': ['*'],\n- 'sprintf-js': ['dist/*'],\n+ 'sprintf-js': ['*'],\n 'underscore': ['*'],\n- 'underscore.string': ['dist/*']\n+ 'underscore.string': ['*']\n }\n", "issue": "NPM_FILE_PATTERNS - collectstatic failing on Windows\nMyTardis currently uses `django-npm` to collect static content which has been npm installed into the `node_modules/` folder.\r\n\r\nBecause we don't necessarily want to copy everything from `node_modules/` when running `collectstatic`, MyTardis uses django-npm's `NPM_FILE_PATTERNS` setting in `tardis/default_settings/static_files.py`\r\n\r\nSee: https://github.com/kevin1024/django-npm#configuration\r\n\r\nIt can be used like this:\r\n\r\n```\r\nNPM_FILE_PATTERNS = {\r\n 'jquery': ['*'],\r\n 'jquery-migrate': ['*'],\r\n}\r\n```\r\n\r\nto copy everything within `node_modules/jquery/` and everything within `node_modules/jquery-migrate/` into the static folder written to by `collectstatic`.\r\n\r\nIf you only want `collectstatic` to copy a subset of the files in the node_modules folder, `django-npm` provides the ability to use glob patterns like this:\r\n\r\n```\r\nNPM_FILE_PATTERNS = {\r\n 'bootstrap': ['dist/*'],\r\n 'font-awesome': ['css/*', 'fonts/*'],\r\n}\r\n```\r\n\r\nHowever, these glob patterns don't seem to work on Windows, i.e. nothing is copied from the `node_modules` folders which have glob patterns more complex than `['*']`, see: https://github.com/kevin1024/django-npm/issues/15\r\n\r\nA workaround (when running MyTardis's `collectstatic` on Windows) is to redefine `NPM_FILE_PATTERNS` in your `tardis/settings.py`, and just use the `['*']` pattern for every node module you want to be copied by `collectstatic`.\n", "before_files": [{"content": "from os import path\nfrom .storage import DEFAULT_STORAGE_BASE_DIR\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR\n\n# Used by \"django collectstatic\"\nSTATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '../..', 'static'))\n\n# Use cachable copies of static files\nSTATICFILES_STORAGE = \\\n 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'npm.finders.NpmFinder',\n)\n\n# django-npm settings:\nNPM_ROOT_PATH = path.abspath(path.join(path.dirname(__file__), '../..'))\n\n# If you have run \"npm install\", rather than \"npm install --production\",\n# you will get a lot of devDependencies installed in node_modules/ which\n# are only needed for development/testing (e.g. \"npm test\") and don't\n# need to be copied when running collectstatic. NPM_FILE_PATTERNS\n# specifies the folders within node_modules/ which do need to be copied:\nNPM_FILE_PATTERNS = {\n 'angular': ['*'],\n 'angular-resource': ['*'],\n 'backbone': ['*'],\n 'backbone-forms': ['*'],\n 'blueimp-file-upload': ['*'],\n 'bootstrap': ['dist/*'],\n 'bootstrap-3-typeahead': ['*'],\n 'clipboard': ['*'],\n 'font-awesome': ['css/*', 'fonts/*'],\n 'jquery': ['*'],\n 'jquery-migrate': ['*'],\n 'jquery-ui-dist': ['jquery-ui.min.js'],\n 'mustache': ['mustache.min.js'],\n 'ng-dialog': ['*'],\n 'sprintf-js': ['dist/*'],\n 'underscore': ['*'],\n 'underscore.string': ['dist/*']\n}\n", "path": "tardis/default_settings/static_files.py"}]}
| 1,412 | 237 |
gh_patches_debug_25561
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1352
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MyPy: improve ignite.utils module
## 🚀 Feature
Currently, mypy ignores all errors for all modules. We have to rework our typing such that mypy checks the code.
In this issue, let's improve https://github.com/pytorch/ignite/blob/master/ignite/utils.py module such that mypy passes on it.
For Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.
Please, take a look at CONTRIBUTING guide.
</issue>
<code>
[start of ignite/utils.py]
1 import collections.abc as collections
2 import logging
3 import random
4 from typing import Any, Callable, Optional, Tuple, Type, Union
5
6 import torch
7
8 __all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger", "manual_seed"]
9
10
11 def convert_tensor(
12 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],
13 device: Optional[Union[str, torch.device]] = None,
14 non_blocking: bool = False,
15 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
16 """Move tensors to relevant device."""
17
18 def _func(tensor: torch.Tensor) -> torch.Tensor:
19 return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor
20
21 return apply_to_tensor(input_, _func)
22
23
24 def apply_to_tensor(
25 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
26 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
27 """Apply a function on a tensor or mapping, or sequence of tensors.
28 """
29 return apply_to_type(input_, torch.Tensor, func)
30
31
32 def apply_to_type(
33 input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],
34 input_type: Union[Type, Tuple[Type[Any], Any]],
35 func: Callable,
36 ) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:
37 """Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.
38 """
39 if isinstance(input_, input_type):
40 return func(input_)
41 if isinstance(input_, (str, bytes)):
42 return input_
43 if isinstance(input_, collections.Mapping):
44 return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})
45 if isinstance(input_, tuple) and hasattr(input_, "_fields"): # namedtuple
46 return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))
47 if isinstance(input_, collections.Sequence):
48 return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])
49 raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_))))
50
51
52 def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:
53 """Convert a tensor of indices of any shape `(N, ...)` to a
54 tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the
55 input's device`.
56 """
57 onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)
58 return onehot.scatter_(1, indices.unsqueeze(1), 1)
59
60
61 def setup_logger(
62 name: Optional[str] = None,
63 level: int = logging.INFO,
64 format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
65 filepath: Optional[str] = None,
66 distributed_rank: Optional[int] = None,
67 ) -> logging.Logger:
68 """Setups logger: name, level, format etc.
69
70 Args:
71 name (str, optional): new name for the logger. If None, the standard logger is used.
72 level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG
73 format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`
74 filepath (str, optional): Optional logging file path. If not None, logs are written to the file.
75 distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.
76 If None, distributed_rank is initialized to the rank of process.
77
78 Returns:
79 logging.Logger
80
81 For example, to improve logs readability when training with a trainer and evaluator:
82
83 .. code-block:: python
84
85 from ignite.utils import setup_logger
86
87 trainer = ...
88 evaluator = ...
89
90 trainer.logger = setup_logger("trainer")
91 evaluator.logger = setup_logger("evaluator")
92
93 trainer.run(data, max_epochs=10)
94
95 # Logs will look like
96 # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
97 # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
98 # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
99 # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
100 # ...
101
102 """
103 logger = logging.getLogger(name)
104
105 # don't propagate to ancestors
106 # the problem here is to attach handlers to loggers
107 # should we provide a default configuration less open ?
108 if name is not None:
109 logger.propagate = False
110
111 # Remove previous handlers
112 if logger.hasHandlers():
113 for h in list(logger.handlers):
114 logger.removeHandler(h)
115
116 formatter = logging.Formatter(format)
117
118 if distributed_rank is None:
119 import ignite.distributed as idist
120
121 distributed_rank = idist.get_rank()
122
123 if distributed_rank > 0:
124 logger.addHandler(logging.NullHandler())
125 else:
126 logger.setLevel(level)
127
128 ch = logging.StreamHandler()
129 ch.setLevel(level)
130 ch.setFormatter(formatter)
131 logger.addHandler(ch)
132
133 if filepath is not None:
134 fh = logging.FileHandler(filepath)
135 fh.setLevel(level)
136 fh.setFormatter(formatter)
137 logger.addHandler(fh)
138
139 return logger
140
141
142 def manual_seed(seed: int) -> None:
143 """Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).
144
145 Args:
146 seed (int): Random state seed
147
148 """
149 random.seed(seed)
150 torch.manual_seed(seed)
151 try:
152 import numpy as np
153
154 np.random.seed(seed)
155 except ImportError:
156 pass
157
[end of ignite/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/utils.py b/ignite/utils.py
--- a/ignite/utils.py
+++ b/ignite/utils.py
@@ -1,7 +1,7 @@
import collections.abc as collections
import logging
import random
-from typing import Any, Callable, Optional, Tuple, Type, Union
+from typing import Any, Callable, Optional, Tuple, Type, Union, cast
import torch
@@ -41,11 +41,13 @@
if isinstance(input_, (str, bytes)):
return input_
if isinstance(input_, collections.Mapping):
- return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})
+ return cast(Callable, type(input_))(
+ {k: apply_to_type(sample, input_type, func) for k, sample in input_.items()}
+ )
if isinstance(input_, tuple) and hasattr(input_, "_fields"): # namedtuple
- return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))
+ return cast(Callable, type(input_))(*(apply_to_type(sample, input_type, func) for sample in input_))
if isinstance(input_, collections.Sequence):
- return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])
+ return cast(Callable, type(input_))([apply_to_type(sample, input_type, func) for sample in input_])
raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_))))
|
{"golden_diff": "diff --git a/ignite/utils.py b/ignite/utils.py\n--- a/ignite/utils.py\n+++ b/ignite/utils.py\n@@ -1,7 +1,7 @@\n import collections.abc as collections\n import logging\n import random\n-from typing import Any, Callable, Optional, Tuple, Type, Union\n+from typing import Any, Callable, Optional, Tuple, Type, Union, cast\n \n import torch\n \n@@ -41,11 +41,13 @@\n if isinstance(input_, (str, bytes)):\n return input_\n if isinstance(input_, collections.Mapping):\n- return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})\n+ return cast(Callable, type(input_))(\n+ {k: apply_to_type(sample, input_type, func) for k, sample in input_.items()}\n+ )\n if isinstance(input_, tuple) and hasattr(input_, \"_fields\"): # namedtuple\n- return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))\n+ return cast(Callable, type(input_))(*(apply_to_type(sample, input_type, func) for sample in input_))\n if isinstance(input_, collections.Sequence):\n- return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])\n+ return cast(Callable, type(input_))([apply_to_type(sample, input_type, func) for sample in input_])\n raise TypeError((\"input must contain {}, dicts or lists; found {}\".format(input_type, type(input_))))\n", "issue": "MyPy: improve ignite.utils module\n## \ud83d\ude80 Feature\r\n\r\nCurrently, mypy ignores all errors for all modules. We have to rework our typing such that mypy checks the code.\r\nIn this issue, let's improve https://github.com/pytorch/ignite/blob/master/ignite/utils.py module such that mypy passes on it.\r\n\r\nFor Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.\r\nPlease, take a look at CONTRIBUTING guide.\r\n\n", "before_files": [{"content": "import collections.abc as collections\nimport logging\nimport random\nfrom typing import Any, Callable, Optional, Tuple, Type, Union\n\nimport torch\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"manual_seed\"]\n\n\ndef convert_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n\n return apply_to_tensor(input_, _func)\n\n\ndef apply_to_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n \"\"\"\n return apply_to_type(input_, torch.Tensor, func)\n\n\ndef apply_to_type(\n input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.\n \"\"\"\n if isinstance(input_, input_type):\n return func(input_)\n if isinstance(input_, (str, bytes)):\n return input_\n if isinstance(input_, collections.Mapping):\n return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})\n if isinstance(input_, tuple) and hasattr(input_, \"_fields\"): # namedtuple\n return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))\n if isinstance(input_, collections.Sequence):\n return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])\n raise TypeError((\"input must contain {}, dicts or lists; found {}\".format(input_type, type(input_))))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the\n input's device`.\n \"\"\"\n onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: Optional[str] = None,\n level: int = logging.INFO,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: Optional[int] = None,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name (str, optional): new name for the logger. If None, the standard logger is used.\n level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG\n format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`\n filepath (str, optional): Optional logging file path. If not None, logs are written to the file.\n distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.\n If None, distributed_rank is initialized to the rank of process.\n\n Returns:\n logging.Logger\n\n For example, to improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n \"\"\"\n logger = logging.getLogger(name)\n\n # don't propagate to ancestors\n # the problem here is to attach handlers to loggers\n # should we provide a default configuration less open ?\n if name is not None:\n logger.propagate = False\n\n # Remove previous handlers\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n formatter = logging.Formatter(format)\n\n if distributed_rank is None:\n import ignite.distributed as idist\n\n distributed_rank = idist.get_rank()\n\n if distributed_rank > 0:\n logger.addHandler(logging.NullHandler())\n else:\n logger.setLevel(level)\n\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\ndef manual_seed(seed: int) -> None:\n \"\"\"Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).\n\n Args:\n seed (int): Random state seed\n\n \"\"\"\n random.seed(seed)\n torch.manual_seed(seed)\n try:\n import numpy as np\n\n np.random.seed(seed)\n except ImportError:\n pass\n", "path": "ignite/utils.py"}]}
| 2,389 | 342 |
gh_patches_debug_36787
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-3822
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nbconvert: Ability to specify name of output file
Brought up in a conversation between @Carreau and I. Maybe add a traitlet to the `FilesWriter` and add a convenience flag to the `nbconvertapp.py`.
</issue>
<code>
[start of IPython/nbconvert/nbconvertapp.py]
1 #!/usr/bin/env python
2 """NBConvert is a utility for conversion of .ipynb files.
3
4 Command-line interface for the NbConvert conversion utility.
5 """
6 #-----------------------------------------------------------------------------
7 #Copyright (c) 2013, the IPython Development Team.
8 #
9 #Distributed under the terms of the Modified BSD License.
10 #
11 #The full license is in the file COPYING.txt, distributed with this software.
12 #-----------------------------------------------------------------------------
13
14 #-----------------------------------------------------------------------------
15 #Imports
16 #-----------------------------------------------------------------------------
17
18 # Stdlib imports
19 from __future__ import print_function
20 import sys
21 import os
22 import glob
23
24 # From IPython
25 from IPython.core.application import BaseIPythonApplication, base_aliases, base_flags
26 from IPython.config import catch_config_error, Configurable
27 from IPython.utils.traitlets import (
28 Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,
29 )
30 from IPython.utils.importstring import import_item
31
32 from .exporters.export import export_by_name, get_export_names, ExporterNameError
33 from IPython.nbconvert import exporters, transformers, writers, post_processors
34 from .utils.base import NbConvertBase
35 from .utils.exceptions import ConversionException
36
37 #-----------------------------------------------------------------------------
38 #Classes and functions
39 #-----------------------------------------------------------------------------
40
41 class DottedOrNone(DottedObjectName):
42 """
43 A string holding a valid dotted object name in Python, such as A.b3._c
44 Also allows for None type."""
45
46 default_value = u''
47
48 def validate(self, obj, value):
49 if value is not None and len(value) > 0:
50 return super(DottedOrNone, self).validate(obj, value)
51 else:
52 return value
53
54 nbconvert_aliases = {}
55 nbconvert_aliases.update(base_aliases)
56 nbconvert_aliases.update({
57 'to' : 'NbConvertApp.export_format',
58 'template' : 'Exporter.template_file',
59 'notebooks' : 'NbConvertApp.notebooks',
60 'writer' : 'NbConvertApp.writer_class',
61 'post': 'NbConvertApp.post_processor_class'
62 })
63
64 nbconvert_flags = {}
65 nbconvert_flags.update(base_flags)
66 nbconvert_flags.update({
67 'stdout' : (
68 {'NbConvertApp' : {'writer_class' : "StdoutWriter"}},
69 "Write notebook output to stdout instead of files."
70 )
71 })
72
73
74 class NbConvertApp(BaseIPythonApplication):
75 """Application used to convert to and from notebook file type (*.ipynb)"""
76
77 name = 'ipython-nbconvert'
78 aliases = nbconvert_aliases
79 flags = nbconvert_flags
80
81 def _classes_default(self):
82 classes = [NbConvertBase]
83 for pkg in (exporters, transformers, writers):
84 for name in dir(pkg):
85 cls = getattr(pkg, name)
86 if isinstance(cls, type) and issubclass(cls, Configurable):
87 classes.append(cls)
88 return classes
89
90 description = Unicode(
91 u"""This application is used to convert notebook files (*.ipynb)
92 to various other formats.
93
94 WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.""")
95
96 examples = Unicode(u"""
97 The simplest way to use nbconvert is
98
99 > ipython nbconvert mynotebook.ipynb
100
101 which will convert mynotebook.ipynb to the default format (probably HTML).
102
103 You can specify the export format with `--to`.
104 Options include {0}
105
106 > ipython nbconvert --to latex mynotebook.ipnynb
107
108 Both HTML and LaTeX support multiple output templates. LaTeX includes
109 'basic', 'book', and 'article'. HTML includes 'basic' and 'full'. You
110 can specify the flavor of the format used.
111
112 > ipython nbconvert --to html --template basic mynotebook.ipynb
113
114 You can also pipe the output to stdout, rather than a file
115
116 > ipython nbconvert mynotebook.ipynb --stdout
117
118 A post-processor can be used to compile a PDF
119
120 > ipython nbconvert mynotebook.ipynb --to latex --post PDF
121
122 Multiple notebooks can be given at the command line in a couple of
123 different ways:
124
125 > ipython nbconvert notebook*.ipynb
126 > ipython nbconvert notebook1.ipynb notebook2.ipynb
127
128 or you can specify the notebooks list in a config file, containing::
129
130 c.NbConvertApp.notebooks = ["my_notebook.ipynb"]
131
132 > ipython nbconvert --config mycfg.py
133 """.format(get_export_names()))
134
135 # Writer specific variables
136 writer = Instance('IPython.nbconvert.writers.base.WriterBase',
137 help="""Instance of the writer class used to write the
138 results of the conversion.""")
139 writer_class = DottedObjectName('FilesWriter', config=True,
140 help="""Writer class used to write the
141 results of the conversion""")
142 writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',
143 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',
144 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}
145 writer_factory = Type()
146
147 def _writer_class_changed(self, name, old, new):
148 if new in self.writer_aliases:
149 new = self.writer_aliases[new]
150 self.writer_factory = import_item(new)
151
152 # Post-processor specific variables
153 post_processor = Instance('IPython.nbconvert.post_processors.base.PostProcessorBase',
154 help="""Instance of the PostProcessor class used to write the
155 results of the conversion.""")
156
157 post_processor_class = DottedOrNone(config=True,
158 help="""PostProcessor class used to write the
159 results of the conversion""")
160 post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor'}
161 post_processor_factory = Type()
162
163 def _post_processor_class_changed(self, name, old, new):
164 if new in self.post_processor_aliases:
165 new = self.post_processor_aliases[new]
166 if new:
167 self.post_processor_factory = import_item(new)
168
169
170 # Other configurable variables
171 export_format = CaselessStrEnum(get_export_names(),
172 default_value="html",
173 config=True,
174 help="""The export format to be used."""
175 )
176
177 notebooks = List([], config=True, help="""List of notebooks to convert.
178 Wildcards are supported.
179 Filenames passed positionally will be added to the list.
180 """)
181
182 @catch_config_error
183 def initialize(self, argv=None):
184 super(NbConvertApp, self).initialize(argv)
185 self.init_syspath()
186 self.init_notebooks()
187 self.init_writer()
188 self.init_post_processor()
189
190
191
192 def init_syspath(self):
193 """
194 Add the cwd to the sys.path ($PYTHONPATH)
195 """
196 sys.path.insert(0, os.getcwd())
197
198
199 def init_notebooks(self):
200 """Construct the list of notebooks.
201 If notebooks are passed on the command-line,
202 they override notebooks specified in config files.
203 Glob each notebook to replace notebook patterns with filenames.
204 """
205
206 # Specifying notebooks on the command-line overrides (rather than adds)
207 # the notebook list
208 if self.extra_args:
209 patterns = self.extra_args
210 else:
211 patterns = self.notebooks
212
213 # Use glob to replace all the notebook patterns with filenames.
214 filenames = []
215 for pattern in patterns:
216
217 # Use glob to find matching filenames. Allow the user to convert
218 # notebooks without having to type the extension.
219 globbed_files = glob.glob(pattern)
220 globbed_files.extend(glob.glob(pattern + '.ipynb'))
221
222 for filename in globbed_files:
223 if not filename in filenames:
224 filenames.append(filename)
225 self.notebooks = filenames
226
227 def init_writer(self):
228 """
229 Initialize the writer (which is stateless)
230 """
231 self._writer_class_changed(None, self.writer_class, self.writer_class)
232 self.writer = self.writer_factory(parent=self)
233
234 def init_post_processor(self):
235 """
236 Initialize the post_processor (which is stateless)
237 """
238 self._post_processor_class_changed(None, self.post_processor_class,
239 self.post_processor_class)
240 if self.post_processor_factory:
241 self.post_processor = self.post_processor_factory(parent=self)
242
243 def start(self):
244 """
245 Ran after initialization completed
246 """
247 super(NbConvertApp, self).start()
248 self.convert_notebooks()
249
250 def convert_notebooks(self):
251 """
252 Convert the notebooks in the self.notebook traitlet
253 """
254 # Export each notebook
255 conversion_success = 0
256 for notebook_filename in self.notebooks:
257
258 # Get a unique key for the notebook and set it in the resources object.
259 basename = os.path.basename(notebook_filename)
260 notebook_name = basename[:basename.rfind('.')]
261 resources = {}
262 resources['unique_key'] = notebook_name
263 resources['output_files_dir'] = '%s_files' % notebook_name
264
265 # Try to export
266 try:
267 output, resources = export_by_name(self.export_format,
268 notebook_filename,
269 resources=resources,
270 config=self.config)
271 except ExporterNameError as e:
272 print("Error while converting '%s': '%s' exporter not found."
273 %(notebook_filename, self.export_format),
274 file=sys.stderr)
275 print("Known exporters are:",
276 "\n\t" + "\n\t".join(get_export_names()),
277 file=sys.stderr)
278 self.exit(1)
279 except ConversionException as e:
280 print("Error while converting '%s': %s" %(notebook_filename, e),
281 file=sys.stderr)
282 self.exit(1)
283 else:
284 write_resultes = self.writer.write(output, resources, notebook_name=notebook_name)
285
286 #Post-process if post processor has been defined.
287 if hasattr(self, 'post_processor') and self.post_processor:
288 self.post_processor(write_resultes)
289 conversion_success += 1
290
291 # If nothing was converted successfully, help the user.
292 if conversion_success == 0:
293 self.print_help()
294 sys.exit(-1)
295
296
297 #-----------------------------------------------------------------------------
298 # Main entry point
299 #-----------------------------------------------------------------------------
300
301 launch_new_instance = NbConvertApp.launch_instance
302
[end of IPython/nbconvert/nbconvertapp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/nbconvert/nbconvertapp.py b/IPython/nbconvert/nbconvertapp.py
--- a/IPython/nbconvert/nbconvertapp.py
+++ b/IPython/nbconvert/nbconvertapp.py
@@ -28,6 +28,7 @@
Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,
)
from IPython.utils.importstring import import_item
+from IPython.utils.text import dedent
from .exporters.export import export_by_name, get_export_names, ExporterNameError
from IPython.nbconvert import exporters, transformers, writers, post_processors
@@ -58,7 +59,8 @@
'template' : 'Exporter.template_file',
'notebooks' : 'NbConvertApp.notebooks',
'writer' : 'NbConvertApp.writer_class',
- 'post': 'NbConvertApp.post_processor_class'
+ 'post': 'NbConvertApp.post_processor_class',
+ 'output': 'NbConvertApp.output_base'
})
nbconvert_flags = {}
@@ -93,6 +95,10 @@
WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.""")
+ output_base = Unicode('', config=True, help='''overwrite base name use for output files.
+ can only be use when converting one notebook at a time.
+ ''')
+
examples = Unicode(u"""
The simplest way to use nbconvert is
@@ -253,11 +259,21 @@
"""
# Export each notebook
conversion_success = 0
+
+ if self.output_base != '' and len(self.notebooks) > 1:
+ print(dedent(
+ """UsageError: --output flag or `NbConvertApp.output_base` config option
+ cannot be used when converting multiple notebooks.
+ """))
+ self.exit(1)
+
for notebook_filename in self.notebooks:
# Get a unique key for the notebook and set it in the resources object.
basename = os.path.basename(notebook_filename)
notebook_name = basename[:basename.rfind('.')]
+ if self.output_base:
+ notebook_name = self.output_base
resources = {}
resources['unique_key'] = notebook_name
resources['output_files_dir'] = '%s_files' % notebook_name
|
{"golden_diff": "diff --git a/IPython/nbconvert/nbconvertapp.py b/IPython/nbconvert/nbconvertapp.py\n--- a/IPython/nbconvert/nbconvertapp.py\n+++ b/IPython/nbconvert/nbconvertapp.py\n@@ -28,6 +28,7 @@\n Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,\n )\n from IPython.utils.importstring import import_item\n+from IPython.utils.text import dedent\n \n from .exporters.export import export_by_name, get_export_names, ExporterNameError\n from IPython.nbconvert import exporters, transformers, writers, post_processors\n@@ -58,7 +59,8 @@\n 'template' : 'Exporter.template_file',\n 'notebooks' : 'NbConvertApp.notebooks',\n 'writer' : 'NbConvertApp.writer_class',\n- 'post': 'NbConvertApp.post_processor_class'\n+ 'post': 'NbConvertApp.post_processor_class',\n+ 'output': 'NbConvertApp.output_base'\n })\n \n nbconvert_flags = {}\n@@ -93,6 +95,10 @@\n \n WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.\"\"\")\n \n+ output_base = Unicode('', config=True, help='''overwrite base name use for output files.\n+ can only be use when converting one notebook at a time.\n+ ''')\n+\n examples = Unicode(u\"\"\"\n The simplest way to use nbconvert is\n \n@@ -253,11 +259,21 @@\n \"\"\"\n # Export each notebook\n conversion_success = 0\n+\n+ if self.output_base != '' and len(self.notebooks) > 1:\n+ print(dedent(\n+ \"\"\"UsageError: --output flag or `NbConvertApp.output_base` config option\n+ cannot be used when converting multiple notebooks.\n+ \"\"\"))\n+ self.exit(1)\n+\n for notebook_filename in self.notebooks:\n \n # Get a unique key for the notebook and set it in the resources object.\n basename = os.path.basename(notebook_filename)\n notebook_name = basename[:basename.rfind('.')]\n+ if self.output_base:\n+ notebook_name = self.output_base\n resources = {}\n resources['unique_key'] = notebook_name\n resources['output_files_dir'] = '%s_files' % notebook_name\n", "issue": "nbconvert: Ability to specify name of output file\nBrought up in a conversation between @Carreau and I. Maybe add a traitlet to the `FilesWriter` and add a convenience flag to the `nbconvertapp.py`.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"NBConvert is a utility for conversion of .ipynb files.\n\nCommand-line interface for the NbConvert conversion utility.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n#Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib imports\nfrom __future__ import print_function\nimport sys\nimport os\nimport glob\n\n# From IPython\nfrom IPython.core.application import BaseIPythonApplication, base_aliases, base_flags\nfrom IPython.config import catch_config_error, Configurable\nfrom IPython.utils.traitlets import (\n Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,\n)\nfrom IPython.utils.importstring import import_item\n\nfrom .exporters.export import export_by_name, get_export_names, ExporterNameError\nfrom IPython.nbconvert import exporters, transformers, writers, post_processors\nfrom .utils.base import NbConvertBase\nfrom .utils.exceptions import ConversionException\n\n#-----------------------------------------------------------------------------\n#Classes and functions\n#-----------------------------------------------------------------------------\n\nclass DottedOrNone(DottedObjectName):\n \"\"\"\n A string holding a valid dotted object name in Python, such as A.b3._c\n Also allows for None type.\"\"\"\n \n default_value = u''\n\n def validate(self, obj, value):\n if value is not None and len(value) > 0:\n return super(DottedOrNone, self).validate(obj, value)\n else:\n return value\n \nnbconvert_aliases = {}\nnbconvert_aliases.update(base_aliases)\nnbconvert_aliases.update({\n 'to' : 'NbConvertApp.export_format',\n 'template' : 'Exporter.template_file',\n 'notebooks' : 'NbConvertApp.notebooks',\n 'writer' : 'NbConvertApp.writer_class',\n 'post': 'NbConvertApp.post_processor_class'\n})\n\nnbconvert_flags = {}\nnbconvert_flags.update(base_flags)\nnbconvert_flags.update({\n 'stdout' : (\n {'NbConvertApp' : {'writer_class' : \"StdoutWriter\"}},\n \"Write notebook output to stdout instead of files.\"\n )\n})\n\n\nclass NbConvertApp(BaseIPythonApplication):\n \"\"\"Application used to convert to and from notebook file type (*.ipynb)\"\"\"\n\n name = 'ipython-nbconvert'\n aliases = nbconvert_aliases\n flags = nbconvert_flags\n \n def _classes_default(self):\n classes = [NbConvertBase]\n for pkg in (exporters, transformers, writers):\n for name in dir(pkg):\n cls = getattr(pkg, name)\n if isinstance(cls, type) and issubclass(cls, Configurable):\n classes.append(cls)\n return classes\n\n description = Unicode(\n u\"\"\"This application is used to convert notebook files (*.ipynb)\n to various other formats.\n\n WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.\"\"\")\n\n examples = Unicode(u\"\"\"\n The simplest way to use nbconvert is\n \n > ipython nbconvert mynotebook.ipynb\n \n which will convert mynotebook.ipynb to the default format (probably HTML).\n \n You can specify the export format with `--to`.\n Options include {0}\n \n > ipython nbconvert --to latex mynotebook.ipnynb\n\n Both HTML and LaTeX support multiple output templates. LaTeX includes\n 'basic', 'book', and 'article'. HTML includes 'basic' and 'full'. You \n can specify the flavor of the format used.\n\n > ipython nbconvert --to html --template basic mynotebook.ipynb\n \n You can also pipe the output to stdout, rather than a file\n \n > ipython nbconvert mynotebook.ipynb --stdout\n\n A post-processor can be used to compile a PDF\n\n > ipython nbconvert mynotebook.ipynb --to latex --post PDF\n \n Multiple notebooks can be given at the command line in a couple of \n different ways:\n \n > ipython nbconvert notebook*.ipynb\n > ipython nbconvert notebook1.ipynb notebook2.ipynb\n \n or you can specify the notebooks list in a config file, containing::\n \n c.NbConvertApp.notebooks = [\"my_notebook.ipynb\"]\n \n > ipython nbconvert --config mycfg.py\n \"\"\".format(get_export_names()))\n\n # Writer specific variables\n writer = Instance('IPython.nbconvert.writers.base.WriterBase', \n help=\"\"\"Instance of the writer class used to write the \n results of the conversion.\"\"\")\n writer_class = DottedObjectName('FilesWriter', config=True, \n help=\"\"\"Writer class used to write the \n results of the conversion\"\"\")\n writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',\n 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',\n 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}\n writer_factory = Type()\n\n def _writer_class_changed(self, name, old, new):\n if new in self.writer_aliases:\n new = self.writer_aliases[new]\n self.writer_factory = import_item(new)\n\n # Post-processor specific variables\n post_processor = Instance('IPython.nbconvert.post_processors.base.PostProcessorBase', \n help=\"\"\"Instance of the PostProcessor class used to write the \n results of the conversion.\"\"\")\n\n post_processor_class = DottedOrNone(config=True, \n help=\"\"\"PostProcessor class used to write the \n results of the conversion\"\"\")\n post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor'}\n post_processor_factory = Type()\n\n def _post_processor_class_changed(self, name, old, new):\n if new in self.post_processor_aliases:\n new = self.post_processor_aliases[new]\n if new:\n self.post_processor_factory = import_item(new)\n\n\n # Other configurable variables\n export_format = CaselessStrEnum(get_export_names(),\n default_value=\"html\",\n config=True,\n help=\"\"\"The export format to be used.\"\"\"\n )\n\n notebooks = List([], config=True, help=\"\"\"List of notebooks to convert.\n Wildcards are supported.\n Filenames passed positionally will be added to the list.\n \"\"\")\n\n @catch_config_error\n def initialize(self, argv=None):\n super(NbConvertApp, self).initialize(argv)\n self.init_syspath()\n self.init_notebooks()\n self.init_writer()\n self.init_post_processor()\n\n\n\n def init_syspath(self):\n \"\"\"\n Add the cwd to the sys.path ($PYTHONPATH)\n \"\"\"\n sys.path.insert(0, os.getcwd())\n \n\n def init_notebooks(self):\n \"\"\"Construct the list of notebooks.\n If notebooks are passed on the command-line,\n they override notebooks specified in config files.\n Glob each notebook to replace notebook patterns with filenames.\n \"\"\"\n\n # Specifying notebooks on the command-line overrides (rather than adds)\n # the notebook list\n if self.extra_args:\n patterns = self.extra_args\n else:\n patterns = self.notebooks\n\n # Use glob to replace all the notebook patterns with filenames.\n filenames = []\n for pattern in patterns:\n \n # Use glob to find matching filenames. Allow the user to convert \n # notebooks without having to type the extension.\n globbed_files = glob.glob(pattern)\n globbed_files.extend(glob.glob(pattern + '.ipynb'))\n\n for filename in globbed_files:\n if not filename in filenames:\n filenames.append(filename)\n self.notebooks = filenames\n\n def init_writer(self):\n \"\"\"\n Initialize the writer (which is stateless)\n \"\"\"\n self._writer_class_changed(None, self.writer_class, self.writer_class)\n self.writer = self.writer_factory(parent=self)\n\n def init_post_processor(self):\n \"\"\"\n Initialize the post_processor (which is stateless)\n \"\"\"\n self._post_processor_class_changed(None, self.post_processor_class, \n self.post_processor_class)\n if self.post_processor_factory:\n self.post_processor = self.post_processor_factory(parent=self)\n\n def start(self):\n \"\"\"\n Ran after initialization completed\n \"\"\"\n super(NbConvertApp, self).start()\n self.convert_notebooks()\n\n def convert_notebooks(self):\n \"\"\"\n Convert the notebooks in the self.notebook traitlet\n \"\"\"\n # Export each notebook\n conversion_success = 0\n for notebook_filename in self.notebooks:\n\n # Get a unique key for the notebook and set it in the resources object.\n basename = os.path.basename(notebook_filename)\n notebook_name = basename[:basename.rfind('.')]\n resources = {}\n resources['unique_key'] = notebook_name\n resources['output_files_dir'] = '%s_files' % notebook_name\n\n # Try to export\n try:\n output, resources = export_by_name(self.export_format,\n notebook_filename, \n resources=resources,\n config=self.config)\n except ExporterNameError as e:\n print(\"Error while converting '%s': '%s' exporter not found.\"\n %(notebook_filename, self.export_format),\n file=sys.stderr)\n print(\"Known exporters are:\",\n \"\\n\\t\" + \"\\n\\t\".join(get_export_names()),\n file=sys.stderr)\n self.exit(1)\n except ConversionException as e:\n print(\"Error while converting '%s': %s\" %(notebook_filename, e),\n file=sys.stderr)\n self.exit(1)\n else:\n write_resultes = self.writer.write(output, resources, notebook_name=notebook_name)\n\n #Post-process if post processor has been defined.\n if hasattr(self, 'post_processor') and self.post_processor:\n self.post_processor(write_resultes)\n conversion_success += 1\n\n # If nothing was converted successfully, help the user.\n if conversion_success == 0:\n self.print_help()\n sys.exit(-1)\n\n\n#-----------------------------------------------------------------------------\n# Main entry point\n#-----------------------------------------------------------------------------\n\nlaunch_new_instance = NbConvertApp.launch_instance\n", "path": "IPython/nbconvert/nbconvertapp.py"}]}
| 3,584 | 511 |
gh_patches_debug_13235
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-618
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jpeg recipe is broken
It is missing /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk
Perpaps just the path is incorrect?
```
[INFO]: Prebuilding jpeg for armeabi
[INFO]: jpeg has no prebuild_armeabi, skipping
[DEBUG]: -> running cp /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk /home/brussee/.local/share/python-for-android/build/other_builds/jpeg/armeabi/jpeg/Application.mk
[DEBUG]: /bin/cp: cannot stat ‘/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk’: No such file or directory
Traceback (most recent call last):
File "/home/brussee/.local/bin/p4a", line 9, in <module>
load_entry_point('python-for-android==0.3', 'console_scripts', 'p4a')()
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 708, in main
ToolchainCL()
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 323, in __init__
getattr(self, args.command)(unknown)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 105, in wrapper_func
build_dist_from_args(ctx, dist, dist_args)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py", line 142, in build_dist_from_args
build_recipes(build_order, python_modules, ctx)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/build.py", line 543, in build_recipes
recipe.prebuild_arch(arch)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/__init__.py", line 22, in prebuild_arch
shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)
File "/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/logger.py", line 160, in shprint
for line in output:
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 565, in next
self.wait()
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 500, in wait
self.handle_command_exit_code(exit_code)
File "/home/brussee/.local/lib/python2.7/site-packages/sh.py", line 516, in handle_command_exit_code
raise exc(self.ran, self.process.stdout, self.process.stderr)
sh.ErrorReturnCode_1
```
</issue>
<code>
[start of setup.py]
1
2 from setuptools import setup, find_packages
3 from os import walk
4 from os.path import join, dirname, sep
5 import os
6 import glob
7
8 # NOTE: All package data should also be set in MANIFEST.in
9
10 packages = find_packages()
11
12 package_data = {'': ['*.tmpl',
13 '*.patch', ], }
14
15 data_files = []
16
17 # By specifying every file manually, package_data will be able to
18 # include them in binary distributions. Note that we have to add
19 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
20 # work.
21 def recursively_include(results, directory, patterns):
22 for root, subfolders, files in walk(directory):
23 for fn in files:
24 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):
25 continue
26 filename = join(root, fn)
27 directory = 'pythonforandroid'
28 if directory not in results:
29 results[directory] = []
30 results[directory].append(join(*filename.split(sep)[1:]))
31
32 recursively_include(package_data, 'pythonforandroid/recipes',
33 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])
34 recursively_include(package_data, 'pythonforandroid/bootstraps',
35 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
36 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
37 recursively_include(package_data, 'pythonforandroid/bootstraps',
38 ['sdl-config', ])
39 recursively_include(package_data, 'pythonforandroid',
40 ['liblink', 'biglink', 'liblink.sh'])
41
42 setup(name='python-for-android',
43 version='0.3',
44 description='Android APK packager for Python scripts and apps',
45 author='The Kivy team',
46 author_email='[email protected]',
47 url='https://github.com/kivy/python-for-android',
48 license='MIT',
49 install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',
50 'six'],
51 entry_points={
52 'console_scripts': [
53 'python-for-android = pythonforandroid.toolchain:main',
54 'p4a = pythonforandroid.toolchain:main',
55 ],
56 'distutils.commands': [
57 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',
58 ],
59 },
60 classifiers = [
61 'Development Status :: 3 - Alpha',
62 'Intended Audience :: Developers',
63 'License :: OSI Approved :: MIT License',
64 'Operating System :: Microsoft :: Windows',
65 'Operating System :: OS Independent',
66 'Operating System :: POSIX :: Linux',
67 'Operating System :: MacOS :: MacOS X',
68 'Programming Language :: C',
69 'Programming Language :: Python :: 2',
70 'Programming Language :: Python :: 3',
71 'Topic :: Software Development',
72 'Topic :: Utilities',
73 ],
74 packages=packages,
75 package_data=package_data,
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,8 @@
results[directory].append(join(*filename.split(sep)[1:]))
recursively_include(package_data, 'pythonforandroid/recipes',
- ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])
+ ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
+ '*.mk', ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
'*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,8 @@\n results[directory].append(join(*filename.split(sep)[1:]))\n \n recursively_include(package_data, 'pythonforandroid/recipes',\n- ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])\n+ ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n+ '*.mk', ])\n recursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\n", "issue": "Jpeg recipe is broken\nIt is missing /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk\nPerpaps just the path is incorrect?\n\n```\n[INFO]: Prebuilding jpeg for armeabi\n[INFO]: jpeg has no prebuild_armeabi, skipping\n[DEBUG]: -> running cp /home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk /home/brussee/.local/share/python-for-android/build/other_builds/jpeg/armeabi/jpeg/Application.mk\n[DEBUG]: /bin/cp: cannot stat \u2018/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/Application.mk\u2019: No such file or directory\nTraceback (most recent call last):\n File \"/home/brussee/.local/bin/p4a\", line 9, in <module>\n load_entry_point('python-for-android==0.3', 'console_scripts', 'p4a')()\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 708, in main\n ToolchainCL()\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 323, in __init__\n getattr(self, args.command)(unknown)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 105, in wrapper_func\n build_dist_from_args(ctx, dist, dist_args)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/toolchain.py\", line 142, in build_dist_from_args\n build_recipes(build_order, python_modules, ctx)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/build.py\", line 543, in build_recipes\n recipe.prebuild_arch(arch)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/recipes/jpeg/__init__.py\", line 22, in prebuild_arch\n shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)\n File \"/home/brussee/.local/lib/python2.7/site-packages/pythonforandroid/logger.py\", line 160, in shprint\n for line in output:\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 565, in next\n self.wait()\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 500, in wait\n self.handle_command_exit_code(exit_code)\n File \"/home/brussee/.local/lib/python2.7/site-packages/sh.py\", line 516, in handle_command_exit_code\n raise exc(self.ran, self.process.stdout, self.process.stderr)\nsh.ErrorReturnCode_1\n```\n\n", "before_files": [{"content": "\nfrom setuptools import setup, find_packages\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport glob\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nsetup(name='python-for-android',\n version='0.3',\n description='Android APK packager for Python scripts and apps',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android', \n license='MIT', \n install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',\n 'six'],\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.toolchain:main',\n 'p4a = pythonforandroid.toolchain:main',\n ],\n 'distutils.commands': [\n 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
| 1,982 | 167 |
gh_patches_debug_36243
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-9619
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--dns-route53-propagation-seconds not respected?
We got a report that `--dns-route53-propagation-seconds` is not respected. Quickly looking at the code, this seems like a real problem because that flag is added by the common DNS authenticator class in Certbot and used in the `perform` method which the route53 plugin overrides without calling the parent class.
It looks like the route53 plugin has some additional logic to wait for DNS changes to propagate, but I think we should either respect that flag or deprecate it and remove it from the plugin in the future.
</issue>
<code>
[start of certbot-dns-route53/certbot_dns_route53/__init__.py]
1 """
2 The `~certbot_dns_route53.dns_route53` plugin automates the process of
3 completing a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and
4 subsequently removing, TXT records using the Amazon Web Services Route 53 API.
5
6 .. note::
7 The plugin is not installed by default. It can be installed by heading to
8 `certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and
9 selecting the Wildcard tab.
10
11 Named Arguments
12 ---------------
13
14 ======================================== =====================================
15 ``--dns-route53-propagation-seconds`` The number of seconds to wait for DNS
16 to propagate before asking the ACME
17 server to verify the DNS record.
18 (Default: 10)
19 ======================================== =====================================
20
21
22 Credentials
23 -----------
24 Use of this plugin requires a configuration file containing Amazon Web Sevices
25 API credentials for an account with the following permissions:
26
27 * ``route53:ListHostedZones``
28 * ``route53:GetChange``
29 * ``route53:ChangeResourceRecordSets``
30
31 These permissions can be captured in an AWS policy like the one below. Amazon
32 provides `information about managing access <https://docs.aws.amazon.com/Route53
33 /latest/DeveloperGuide/access-control-overview.html>`_ and `information about
34 the required permissions <https://docs.aws.amazon.com/Route53/latest
35 /DeveloperGuide/r53-api-permissions-ref.html>`_
36
37 .. code-block:: json
38 :name: sample-aws-policy.json
39 :caption: Example AWS policy file:
40
41 {
42 "Version": "2012-10-17",
43 "Id": "certbot-dns-route53 sample policy",
44 "Statement": [
45 {
46 "Effect": "Allow",
47 "Action": [
48 "route53:ListHostedZones",
49 "route53:GetChange"
50 ],
51 "Resource": [
52 "*"
53 ]
54 },
55 {
56 "Effect" : "Allow",
57 "Action" : [
58 "route53:ChangeResourceRecordSets"
59 ],
60 "Resource" : [
61 "arn:aws:route53:::hostedzone/YOURHOSTEDZONEID"
62 ]
63 }
64 ]
65 }
66
67 The `access keys <https://docs.aws.amazon.com/general/latest/gr
68 /aws-sec-cred-types.html#access-keys-and-secret-access-keys>`_ for an account
69 with these permissions must be supplied in one of the following ways, which are
70 discussed in more detail in the Boto3 library's documentation about `configuring
71 credentials <https://boto3.readthedocs.io/en/latest/guide/configuration.html
72 #best-practices-for-configuring-credentials>`_.
73
74 * Using the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment
75 variables.
76 * Using a credentials configuration file at the default location,
77 ``~/.aws/config``.
78 * Using a credentials configuration file at a path supplied using the
79 ``AWS_CONFIG_FILE`` environment variable.
80
81 .. code-block:: ini
82 :name: config.ini
83 :caption: Example credentials config file:
84
85 [default]
86 aws_access_key_id=AKIAIOSFODNN7EXAMPLE
87 aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
88
89 .. caution::
90 You should protect these API credentials as you would a password. Users who
91 can read this file can use these credentials to issue some types of API calls
92 on your behalf, limited by the permissions assigned to the account. Users who
93 can cause Certbot to run using these credentials can complete a ``dns-01``
94 challenge to acquire new certificates or revoke existing certificates for
95 domains these credentials are authorized to manage.
96
97
98 Examples
99 --------
100 .. code-block:: bash
101 :caption: To acquire a certificate for ``example.com``
102
103 certbot certonly \\
104 --dns-route53 \\
105 -d example.com
106
107 .. code-block:: bash
108 :caption: To acquire a single certificate for both ``example.com`` and
109 ``www.example.com``
110
111 certbot certonly \\
112 --dns-route53 \\
113 -d example.com \\
114 -d www.example.com
115
116 .. code-block:: bash
117 :caption: To acquire a certificate for ``example.com``, waiting 30 seconds
118 for DNS propagation
119
120 certbot certonly \\
121 --dns-route53 \\
122 --dns-route53-propagation-seconds 30 \\
123 -d example.com
124 """
125
[end of certbot-dns-route53/certbot_dns_route53/__init__.py]
[start of certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py]
1 """Certbot Route53 authenticator plugin."""
2 import collections
3 import logging
4 import time
5 from typing import Any
6 from typing import DefaultDict
7 from typing import Dict
8 from typing import List
9
10 import boto3
11 from botocore.exceptions import ClientError
12 from botocore.exceptions import NoCredentialsError
13
14 from acme.challenges import ChallengeResponse
15 from certbot import errors
16 from certbot.achallenges import AnnotatedChallenge
17 from certbot.plugins import dns_common
18
19 logger = logging.getLogger(__name__)
20
21 INSTRUCTIONS = (
22 "To use certbot-dns-route53, configure credentials as described at "
23 "https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials " # pylint: disable=line-too-long
24 "and add the necessary permissions for Route53 access.")
25
26
27 class Authenticator(dns_common.DNSAuthenticator):
28 """Route53 Authenticator
29
30 This authenticator solves a DNS01 challenge by uploading the answer to AWS
31 Route53.
32 """
33
34 description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
35 "DNS).")
36 ttl = 10
37
38 def __init__(self, *args: Any, **kwargs: Any) -> None:
39 super().__init__(*args, **kwargs)
40 self.r53 = boto3.client("route53")
41 self._resource_records: DefaultDict[str, List[Dict[str, str]]] = \
42 collections.defaultdict(list)
43
44 def more_info(self) -> str:
45 return "Solve a DNS01 challenge using AWS Route53"
46
47 def _setup_credentials(self) -> None:
48 pass
49
50 def _perform(self, domain: str, validation_name: str, validation: str) -> None:
51 pass
52
53 def perform(self, achalls: List[AnnotatedChallenge]) -> List[ChallengeResponse]:
54 self._attempt_cleanup = True
55
56 try:
57 change_ids = [
58 self._change_txt_record("UPSERT",
59 achall.validation_domain_name(achall.domain),
60 achall.validation(achall.account_key))
61 for achall in achalls
62 ]
63
64 for change_id in change_ids:
65 self._wait_for_change(change_id)
66 except (NoCredentialsError, ClientError) as e:
67 logger.debug('Encountered error during perform: %s', e, exc_info=True)
68 raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
69 return [achall.response(achall.account_key) for achall in achalls]
70
71 def _cleanup(self, domain: str, validation_name: str, validation: str) -> None:
72 try:
73 self._change_txt_record("DELETE", validation_name, validation)
74 except (NoCredentialsError, ClientError) as e:
75 logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
76
77 def _find_zone_id_for_domain(self, domain: str) -> str:
78 """Find the zone id responsible a given FQDN.
79
80 That is, the id for the zone whose name is the longest parent of the
81 domain.
82 """
83 paginator = self.r53.get_paginator("list_hosted_zones")
84 zones = []
85 target_labels = domain.rstrip(".").split(".")
86 for page in paginator.paginate():
87 for zone in page["HostedZones"]:
88 if zone["Config"]["PrivateZone"]:
89 continue
90
91 candidate_labels = zone["Name"].rstrip(".").split(".")
92 if candidate_labels == target_labels[-len(candidate_labels):]:
93 zones.append((zone["Name"], zone["Id"]))
94
95 if not zones:
96 raise errors.PluginError(
97 "Unable to find a Route53 hosted zone for {0}".format(domain)
98 )
99
100 # Order the zones that are suffixes for our desired to domain by
101 # length, this puts them in an order like:
102 # ["foo.bar.baz.com", "bar.baz.com", "baz.com", "com"]
103 # And then we choose the first one, which will be the most specific.
104 zones.sort(key=lambda z: len(z[0]), reverse=True)
105 return zones[0][1]
106
107 def _change_txt_record(self, action: str, validation_domain_name: str, validation: str) -> str:
108 zone_id = self._find_zone_id_for_domain(validation_domain_name)
109
110 rrecords = self._resource_records[validation_domain_name]
111 challenge = {"Value": '"{0}"'.format(validation)}
112 if action == "DELETE":
113 # Remove the record being deleted from the list of tracked records
114 rrecords.remove(challenge)
115 if rrecords:
116 # Need to update instead, as we're not deleting the rrset
117 action = "UPSERT"
118 else:
119 # Create a new list containing the record to use with DELETE
120 rrecords = [challenge]
121 else:
122 rrecords.append(challenge)
123
124 response = self.r53.change_resource_record_sets(
125 HostedZoneId=zone_id,
126 ChangeBatch={
127 "Comment": "certbot-dns-route53 certificate validation " + action,
128 "Changes": [
129 {
130 "Action": action,
131 "ResourceRecordSet": {
132 "Name": validation_domain_name,
133 "Type": "TXT",
134 "TTL": self.ttl,
135 "ResourceRecords": rrecords,
136 }
137 }
138 ]
139 }
140 )
141 return response["ChangeInfo"]["Id"]
142
143 def _wait_for_change(self, change_id: str) -> None:
144 """Wait for a change to be propagated to all Route53 DNS servers.
145 https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html
146 """
147 for unused_n in range(0, 120):
148 response = self.r53.get_change(Id=change_id)
149 if response["ChangeInfo"]["Status"] == "INSYNC":
150 return
151 time.sleep(5)
152 raise errors.PluginError(
153 "Timed out waiting for Route53 change. Current status: %s" %
154 response["ChangeInfo"]["Status"])
155
[end of certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot-dns-route53/certbot_dns_route53/__init__.py b/certbot-dns-route53/certbot_dns_route53/__init__.py
--- a/certbot-dns-route53/certbot_dns_route53/__init__.py
+++ b/certbot-dns-route53/certbot_dns_route53/__init__.py
@@ -8,17 +8,6 @@
`certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and
selecting the Wildcard tab.
-Named Arguments
----------------
-
-======================================== =====================================
-``--dns-route53-propagation-seconds`` The number of seconds to wait for DNS
- to propagate before asking the ACME
- server to verify the DNS record.
- (Default: 10)
-======================================== =====================================
-
-
Credentials
-----------
Use of this plugin requires a configuration file containing Amazon Web Sevices
diff --git a/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py b/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py
--- a/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py
+++ b/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py
@@ -3,6 +3,7 @@
import logging
import time
from typing import Any
+from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import List
@@ -12,9 +13,11 @@
from botocore.exceptions import NoCredentialsError
from acme.challenges import ChallengeResponse
+from certbot import achallenges
from certbot import errors
from certbot.achallenges import AnnotatedChallenge
from certbot.plugins import dns_common
+from certbot.util import add_deprecated_argument
logger = logging.getLogger(__name__)
@@ -44,6 +47,17 @@
def more_info(self) -> str:
return "Solve a DNS01 challenge using AWS Route53"
+ @classmethod
+ def add_parser_arguments(cls, add: Callable[..., None], # pylint: disable=arguments-differ
+ default_propagation_seconds: int = 10) -> None:
+ add_deprecated_argument(add, 'propagation-seconds', 1)
+
+ def auth_hint(self, failed_achalls: List[achallenges.AnnotatedChallenge]) -> str:
+ return (
+ 'The Certificate Authority failed to verify the DNS TXT records created by '
+ '--dns-route53. Ensure the above domains have their DNS hosted by AWS Route53.'
+ )
+
def _setup_credentials(self) -> None:
pass
|
{"golden_diff": "diff --git a/certbot-dns-route53/certbot_dns_route53/__init__.py b/certbot-dns-route53/certbot_dns_route53/__init__.py\n--- a/certbot-dns-route53/certbot_dns_route53/__init__.py\n+++ b/certbot-dns-route53/certbot_dns_route53/__init__.py\n@@ -8,17 +8,6 @@\n `certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and\n selecting the Wildcard tab.\n \n-Named Arguments\n----------------\n-\n-======================================== =====================================\n-``--dns-route53-propagation-seconds`` The number of seconds to wait for DNS\n- to propagate before asking the ACME\n- server to verify the DNS record.\n- (Default: 10)\n-======================================== =====================================\n-\n-\n Credentials\n -----------\n Use of this plugin requires a configuration file containing Amazon Web Sevices\ndiff --git a/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py b/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py\n--- a/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py\n+++ b/certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py\n@@ -3,6 +3,7 @@\n import logging\n import time\n from typing import Any\n+from typing import Callable\n from typing import DefaultDict\n from typing import Dict\n from typing import List\n@@ -12,9 +13,11 @@\n from botocore.exceptions import NoCredentialsError\n \n from acme.challenges import ChallengeResponse\n+from certbot import achallenges\n from certbot import errors\n from certbot.achallenges import AnnotatedChallenge\n from certbot.plugins import dns_common\n+from certbot.util import add_deprecated_argument\n \n logger = logging.getLogger(__name__)\n \n@@ -44,6 +47,17 @@\n def more_info(self) -> str:\n return \"Solve a DNS01 challenge using AWS Route53\"\n \n+ @classmethod\n+ def add_parser_arguments(cls, add: Callable[..., None], # pylint: disable=arguments-differ\n+ default_propagation_seconds: int = 10) -> None:\n+ add_deprecated_argument(add, 'propagation-seconds', 1)\n+\n+ def auth_hint(self, failed_achalls: List[achallenges.AnnotatedChallenge]) -> str:\n+ return (\n+ 'The Certificate Authority failed to verify the DNS TXT records created by '\n+ '--dns-route53. Ensure the above domains have their DNS hosted by AWS Route53.'\n+ )\n+\n def _setup_credentials(self) -> None:\n pass\n", "issue": "--dns-route53-propagation-seconds not respected?\nWe got a report that `--dns-route53-propagation-seconds` is not respected. Quickly looking at the code, this seems like a real problem because that flag is added by the common DNS authenticator class in Certbot and used in the `perform` method which the route53 plugin overrides without calling the parent class.\r\n\r\nIt looks like the route53 plugin has some additional logic to wait for DNS changes to propagate, but I think we should either respect that flag or deprecate it and remove it from the plugin in the future.\n", "before_files": [{"content": "\"\"\"\nThe `~certbot_dns_route53.dns_route53` plugin automates the process of\ncompleting a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and\nsubsequently removing, TXT records using the Amazon Web Services Route 53 API.\n\n.. note::\n The plugin is not installed by default. It can be installed by heading to\n `certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and\n selecting the Wildcard tab.\n\nNamed Arguments\n---------------\n\n======================================== =====================================\n``--dns-route53-propagation-seconds`` The number of seconds to wait for DNS\n to propagate before asking the ACME\n server to verify the DNS record.\n (Default: 10)\n======================================== =====================================\n\n\nCredentials\n-----------\nUse of this plugin requires a configuration file containing Amazon Web Sevices\nAPI credentials for an account with the following permissions:\n\n* ``route53:ListHostedZones``\n* ``route53:GetChange``\n* ``route53:ChangeResourceRecordSets``\n\nThese permissions can be captured in an AWS policy like the one below. Amazon\nprovides `information about managing access <https://docs.aws.amazon.com/Route53\n/latest/DeveloperGuide/access-control-overview.html>`_ and `information about\nthe required permissions <https://docs.aws.amazon.com/Route53/latest\n/DeveloperGuide/r53-api-permissions-ref.html>`_\n\n.. code-block:: json\n :name: sample-aws-policy.json\n :caption: Example AWS policy file:\n\n {\n \"Version\": \"2012-10-17\",\n \"Id\": \"certbot-dns-route53 sample policy\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"route53:ListHostedZones\",\n \"route53:GetChange\"\n ],\n \"Resource\": [\n \"*\"\n ]\n },\n {\n \"Effect\" : \"Allow\",\n \"Action\" : [\n \"route53:ChangeResourceRecordSets\"\n ],\n \"Resource\" : [\n \"arn:aws:route53:::hostedzone/YOURHOSTEDZONEID\"\n ]\n }\n ]\n }\n\nThe `access keys <https://docs.aws.amazon.com/general/latest/gr\n/aws-sec-cred-types.html#access-keys-and-secret-access-keys>`_ for an account\nwith these permissions must be supplied in one of the following ways, which are\ndiscussed in more detail in the Boto3 library's documentation about `configuring\ncredentials <https://boto3.readthedocs.io/en/latest/guide/configuration.html\n#best-practices-for-configuring-credentials>`_.\n\n* Using the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment\n variables.\n* Using a credentials configuration file at the default location,\n ``~/.aws/config``.\n* Using a credentials configuration file at a path supplied using the\n ``AWS_CONFIG_FILE`` environment variable.\n\n.. code-block:: ini\n :name: config.ini\n :caption: Example credentials config file:\n\n [default]\n aws_access_key_id=AKIAIOSFODNN7EXAMPLE\n aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n\n.. caution::\n You should protect these API credentials as you would a password. Users who\n can read this file can use these credentials to issue some types of API calls\n on your behalf, limited by the permissions assigned to the account. Users who\n can cause Certbot to run using these credentials can complete a ``dns-01``\n challenge to acquire new certificates or revoke existing certificates for\n domains these credentials are authorized to manage.\n\n\nExamples\n--------\n.. code-block:: bash\n :caption: To acquire a certificate for ``example.com``\n\n certbot certonly \\\\\n --dns-route53 \\\\\n -d example.com\n\n.. code-block:: bash\n :caption: To acquire a single certificate for both ``example.com`` and\n ``www.example.com``\n\n certbot certonly \\\\\n --dns-route53 \\\\\n -d example.com \\\\\n -d www.example.com\n\n.. code-block:: bash\n :caption: To acquire a certificate for ``example.com``, waiting 30 seconds\n for DNS propagation\n\n certbot certonly \\\\\n --dns-route53 \\\\\n --dns-route53-propagation-seconds 30 \\\\\n -d example.com\n\"\"\"\n", "path": "certbot-dns-route53/certbot_dns_route53/__init__.py"}, {"content": "\"\"\"Certbot Route53 authenticator plugin.\"\"\"\nimport collections\nimport logging\nimport time\nfrom typing import Any\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import List\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.exceptions import NoCredentialsError\n\nfrom acme.challenges import ChallengeResponse\nfrom certbot import errors\nfrom certbot.achallenges import AnnotatedChallenge\nfrom certbot.plugins import dns_common\n\nlogger = logging.getLogger(__name__)\n\nINSTRUCTIONS = (\n \"To use certbot-dns-route53, configure credentials as described at \"\n \"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials \" # pylint: disable=line-too-long\n \"and add the necessary permissions for Route53 access.\")\n\n\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"Route53 Authenticator\n\n This authenticator solves a DNS01 challenge by uploading the answer to AWS\n Route53.\n \"\"\"\n\n description = (\"Obtain certificates using a DNS TXT record (if you are using AWS Route53 for \"\n \"DNS).\")\n ttl = 10\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.r53 = boto3.client(\"route53\")\n self._resource_records: DefaultDict[str, List[Dict[str, str]]] = \\\n collections.defaultdict(list)\n\n def more_info(self) -> str:\n return \"Solve a DNS01 challenge using AWS Route53\"\n\n def _setup_credentials(self) -> None:\n pass\n\n def _perform(self, domain: str, validation_name: str, validation: str) -> None:\n pass\n\n def perform(self, achalls: List[AnnotatedChallenge]) -> List[ChallengeResponse]:\n self._attempt_cleanup = True\n\n try:\n change_ids = [\n self._change_txt_record(\"UPSERT\",\n achall.validation_domain_name(achall.domain),\n achall.validation(achall.account_key))\n for achall in achalls\n ]\n\n for change_id in change_ids:\n self._wait_for_change(change_id)\n except (NoCredentialsError, ClientError) as e:\n logger.debug('Encountered error during perform: %s', e, exc_info=True)\n raise errors.PluginError(\"\\n\".join([str(e), INSTRUCTIONS]))\n return [achall.response(achall.account_key) for achall in achalls]\n\n def _cleanup(self, domain: str, validation_name: str, validation: str) -> None:\n try:\n self._change_txt_record(\"DELETE\", validation_name, validation)\n except (NoCredentialsError, ClientError) as e:\n logger.debug('Encountered error during cleanup: %s', e, exc_info=True)\n\n def _find_zone_id_for_domain(self, domain: str) -> str:\n \"\"\"Find the zone id responsible a given FQDN.\n\n That is, the id for the zone whose name is the longest parent of the\n domain.\n \"\"\"\n paginator = self.r53.get_paginator(\"list_hosted_zones\")\n zones = []\n target_labels = domain.rstrip(\".\").split(\".\")\n for page in paginator.paginate():\n for zone in page[\"HostedZones\"]:\n if zone[\"Config\"][\"PrivateZone\"]:\n continue\n\n candidate_labels = zone[\"Name\"].rstrip(\".\").split(\".\")\n if candidate_labels == target_labels[-len(candidate_labels):]:\n zones.append((zone[\"Name\"], zone[\"Id\"]))\n\n if not zones:\n raise errors.PluginError(\n \"Unable to find a Route53 hosted zone for {0}\".format(domain)\n )\n\n # Order the zones that are suffixes for our desired to domain by\n # length, this puts them in an order like:\n # [\"foo.bar.baz.com\", \"bar.baz.com\", \"baz.com\", \"com\"]\n # And then we choose the first one, which will be the most specific.\n zones.sort(key=lambda z: len(z[0]), reverse=True)\n return zones[0][1]\n\n def _change_txt_record(self, action: str, validation_domain_name: str, validation: str) -> str:\n zone_id = self._find_zone_id_for_domain(validation_domain_name)\n\n rrecords = self._resource_records[validation_domain_name]\n challenge = {\"Value\": '\"{0}\"'.format(validation)}\n if action == \"DELETE\":\n # Remove the record being deleted from the list of tracked records\n rrecords.remove(challenge)\n if rrecords:\n # Need to update instead, as we're not deleting the rrset\n action = \"UPSERT\"\n else:\n # Create a new list containing the record to use with DELETE\n rrecords = [challenge]\n else:\n rrecords.append(challenge)\n\n response = self.r53.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n \"Comment\": \"certbot-dns-route53 certificate validation \" + action,\n \"Changes\": [\n {\n \"Action\": action,\n \"ResourceRecordSet\": {\n \"Name\": validation_domain_name,\n \"Type\": \"TXT\",\n \"TTL\": self.ttl,\n \"ResourceRecords\": rrecords,\n }\n }\n ]\n }\n )\n return response[\"ChangeInfo\"][\"Id\"]\n\n def _wait_for_change(self, change_id: str) -> None:\n \"\"\"Wait for a change to be propagated to all Route53 DNS servers.\n https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html\n \"\"\"\n for unused_n in range(0, 120):\n response = self.r53.get_change(Id=change_id)\n if response[\"ChangeInfo\"][\"Status\"] == \"INSYNC\":\n return\n time.sleep(5)\n raise errors.PluginError(\n \"Timed out waiting for Route53 change. Current status: %s\" %\n response[\"ChangeInfo\"][\"Status\"])\n", "path": "certbot-dns-route53/certbot_dns_route53/_internal/dns_route53.py"}]}
| 3,686 | 639 |
gh_patches_debug_5623
|
rasdani/github-patches
|
git_diff
|
spack__spack-3415
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bison doesn't find m4 at run time
While building `flex`, I had `bison` fail because it could not execute `m4`. The reason was that I had uninstalled the `m4` package (via Spack) which `bison` installed as its build dependency. Then, `bison` failed since this `m4` executable did not exist any more.
I think `m4` needs to be a run-time dependency of `bison` as well.
</issue>
<code>
[start of var/spack/repos/builtin/packages/autoconf/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Autoconf(AutotoolsPackage):
29 """Autoconf -- system configuration part of autotools"""
30
31 homepage = 'https://www.gnu.org/software/autoconf/'
32 url = 'http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz'
33
34 version('2.69', '82d05e03b93e45f5a39b828dc9c6c29b')
35 version('2.62', '6c1f3b3734999035d77da5024aab4fbd')
36 version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')
37 version('2.13', '9de56d4a161a723228220b0f425dc711')
38
39 depends_on('[email protected]:', type='build')
40
41 build_directory = 'spack-build'
42
43 def _make_executable(self, name):
44 return Executable(join_path(self.prefix.bin, name))
45
46 def setup_dependent_package(self, module, dependent_spec):
47 # Autoconf is very likely to be a build dependency,
48 # so we add the tools it provides to the dependent module
49 executables = ['autoconf',
50 'autoheader',
51 'autom4te',
52 'autoreconf',
53 'autoscan',
54 'autoupdate',
55 'ifnames']
56 for name in executables:
57 setattr(module, name, self._make_executable(name))
58
[end of var/spack/repos/builtin/packages/autoconf/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/autoconf/package.py b/var/spack/repos/builtin/packages/autoconf/package.py
--- a/var/spack/repos/builtin/packages/autoconf/package.py
+++ b/var/spack/repos/builtin/packages/autoconf/package.py
@@ -36,7 +36,9 @@
version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')
version('2.13', '9de56d4a161a723228220b0f425dc711')
- depends_on('[email protected]:', type='build')
+ # Note: m4 is not a pure build-time dependency of autoconf. m4 is
+ # needed when autoconf runs, not only when autoconf is built.
+ depends_on('[email protected]:', type=('build', 'run'))
build_directory = 'spack-build'
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/autoconf/package.py b/var/spack/repos/builtin/packages/autoconf/package.py\n--- a/var/spack/repos/builtin/packages/autoconf/package.py\n+++ b/var/spack/repos/builtin/packages/autoconf/package.py\n@@ -36,7 +36,9 @@\n version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')\n version('2.13', '9de56d4a161a723228220b0f425dc711')\n \n- depends_on('[email protected]:', type='build')\n+ # Note: m4 is not a pure build-time dependency of autoconf. m4 is\n+ # needed when autoconf runs, not only when autoconf is built.\n+ depends_on('[email protected]:', type=('build', 'run'))\n \n build_directory = 'spack-build'\n", "issue": "bison doesn't find m4 at run time\nWhile building `flex`, I had `bison` fail because it could not execute `m4`. The reason was that I had uninstalled the `m4` package (via Spack) which `bison` installed as its build dependency. Then, `bison` failed since this `m4` executable did not exist any more.\r\n\r\nI think `m4` needs to be a run-time dependency of `bison` as well.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Autoconf(AutotoolsPackage):\n \"\"\"Autoconf -- system configuration part of autotools\"\"\"\n\n homepage = 'https://www.gnu.org/software/autoconf/'\n url = 'http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz'\n\n version('2.69', '82d05e03b93e45f5a39b828dc9c6c29b')\n version('2.62', '6c1f3b3734999035d77da5024aab4fbd')\n version('2.59', 'd4d45eaa1769d45e59dcb131a4af17a0')\n version('2.13', '9de56d4a161a723228220b0f425dc711')\n\n depends_on('[email protected]:', type='build')\n\n build_directory = 'spack-build'\n\n def _make_executable(self, name):\n return Executable(join_path(self.prefix.bin, name))\n\n def setup_dependent_package(self, module, dependent_spec):\n # Autoconf is very likely to be a build dependency,\n # so we add the tools it provides to the dependent module\n executables = ['autoconf',\n 'autoheader',\n 'autom4te',\n 'autoreconf',\n 'autoscan',\n 'autoupdate',\n 'ifnames']\n for name in executables:\n setattr(module, name, self._make_executable(name))\n", "path": "var/spack/repos/builtin/packages/autoconf/package.py"}]}
| 1,420 | 237 |
gh_patches_debug_13432
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-936
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plots won't show up
Hi everyone,
using nilearn on OSX El Capitan, when executing the example scripts like plot_demo_glass_brain.py no plots will show up. PiP and iPython linked to the same folders, matplotlib alone does show plots. All dependencies are up-to-date and installed. Don't really know how to fix this.
</issue>
<code>
[start of nilearn/plotting/__init__.py]
1 """
2 Plotting code for nilearn
3 """
4 # Authors: Chris Filo Gorgolewski, Gael Varoquaux
5
6 ###############################################################################
7 # Make sure that we don't get DISPLAY problems when running without X on
8 # unices
9 def _set_mpl_backend():
10 try:
11 # We are doing local imports here to avoid poluting our namespace
12 import matplotlib
13 import os
14 # Set the backend to a non-interactive one for unices without X
15 if os.name == 'posix' and 'DISPLAY' not in os.environ:
16 matplotlib.use('Agg')
17 except ImportError:
18 from .._utils.testing import skip_if_running_nose
19 # No need to fail when running tests
20 skip_if_running_nose('matplotlib not installed')
21 raise
22 else:
23 from ..version import (_import_module_with_version_check,
24 OPTIONAL_MATPLOTLIB_MIN_VERSION)
25 # When matplotlib was successfully imported we need to check
26 # that the version is greater that the minimum required one
27 _import_module_with_version_check('matplotlib',
28 OPTIONAL_MATPLOTLIB_MIN_VERSION)
29
30 _set_mpl_backend()
31
32 ###############################################################################
33
34 from . import cm
35 from .img_plotting import plot_img, plot_anat, plot_epi, \
36 plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \
37 plot_prob_atlas, show
38 from .find_cuts import find_xyz_cut_coords, find_cut_slices
39
40 __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
41 'plot_roi', 'plot_stat_map', 'plot_glass_brain',
42 'plot_connectome', 'plot_prob_atlas',
43 'find_xyz_cut_coords', 'find_cut_slices',
44 'show']
45
[end of nilearn/plotting/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py
--- a/nilearn/plotting/__init__.py
+++ b/nilearn/plotting/__init__.py
@@ -11,8 +11,12 @@
# We are doing local imports here to avoid poluting our namespace
import matplotlib
import os
+ import sys
# Set the backend to a non-interactive one for unices without X
- if os.name == 'posix' and 'DISPLAY' not in os.environ:
+ if (os.name == 'posix' and 'DISPLAY' not in os.environ
+ and not (sys.platform == 'darwin'
+ and matplotlib.get_backend() == 'MacOSX'
+ )):
matplotlib.use('Agg')
except ImportError:
from .._utils.testing import skip_if_running_nose
|
{"golden_diff": "diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py\n--- a/nilearn/plotting/__init__.py\n+++ b/nilearn/plotting/__init__.py\n@@ -11,8 +11,12 @@\n # We are doing local imports here to avoid poluting our namespace\n import matplotlib\n import os\n+ import sys\n # Set the backend to a non-interactive one for unices without X\n- if os.name == 'posix' and 'DISPLAY' not in os.environ:\n+ if (os.name == 'posix' and 'DISPLAY' not in os.environ\n+ and not (sys.platform == 'darwin'\n+ and matplotlib.get_backend() == 'MacOSX'\n+ )):\n matplotlib.use('Agg')\n except ImportError:\n from .._utils.testing import skip_if_running_nose\n", "issue": "Plots won't show up\nHi everyone,\n\nusing nilearn on OSX El Capitan, when executing the example scripts like plot_demo_glass_brain.py no plots will show up. PiP and iPython linked to the same folders, matplotlib alone does show plots. All dependencies are up-to-date and installed. Don't really know how to fix this.\n\n", "before_files": [{"content": "\"\"\"\nPlotting code for nilearn\n\"\"\"\n# Authors: Chris Filo Gorgolewski, Gael Varoquaux\n\n###############################################################################\n# Make sure that we don't get DISPLAY problems when running without X on\n# unices\ndef _set_mpl_backend():\n try:\n # We are doing local imports here to avoid poluting our namespace\n import matplotlib\n import os\n # Set the backend to a non-interactive one for unices without X\n if os.name == 'posix' and 'DISPLAY' not in os.environ:\n matplotlib.use('Agg')\n except ImportError:\n from .._utils.testing import skip_if_running_nose\n # No need to fail when running tests\n skip_if_running_nose('matplotlib not installed')\n raise\n else:\n from ..version import (_import_module_with_version_check,\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n # When matplotlib was successfully imported we need to check\n # that the version is greater that the minimum required one\n _import_module_with_version_check('matplotlib',\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n\n_set_mpl_backend()\n\n###############################################################################\n\nfrom . import cm\nfrom .img_plotting import plot_img, plot_anat, plot_epi, \\\n plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \\\n plot_prob_atlas, show\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices\n\n__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',\n 'plot_roi', 'plot_stat_map', 'plot_glass_brain',\n 'plot_connectome', 'plot_prob_atlas',\n 'find_xyz_cut_coords', 'find_cut_slices',\n 'show']\n", "path": "nilearn/plotting/__init__.py"}]}
| 1,077 | 199 |
gh_patches_debug_41715
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-4887
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prefer wheels during plugin install
Pants has sprouted a dependency on a package that is most easily installed via a `whl` on pypi (`openssl` for `requests[security]`).
But currently the plugin installation infrastructure does not enable usage of `WheelPackage` due to a defensive avoidance of assuming that `wheel` is installed in an environment that pants is being loaded from:
https://github.com/pantsbuild/pants/blob/e0d5108ff75a41421a11321c73c817332f0a1c86/src/python/pants/init/plugin_resolver.py#L79-L82
As far as @kwlzn or I can tell, `wheel` should always be on the PYTHONPATH, as pants itself depends on it: it's not clear that it actually needs to be "installed" on the PATH as well.
cc @jsirois for any context he might have on that comment.
</issue>
<code>
[start of src/python/pants/init/plugin_resolver.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
6 unicode_literals, with_statement)
7
8 import hashlib
9 import logging
10 import os
11
12 from pex import resolver
13 from pex.base import requirement_is_exact
14 from pex.package import EggPackage, SourcePackage
15 from pkg_resources import working_set as global_working_set
16 from pkg_resources import Requirement
17
18 from pants.option.global_options import GlobalOptionsRegistrar
19 from pants.python.python_repos import PythonRepos
20 from pants.subsystem.subsystem import Subsystem
21 from pants.util.dirutil import safe_open
22 from pants.util.memo import memoized_property
23 from pants.version import PANTS_SEMVER
24
25
26 logger = logging.getLogger(__name__)
27
28
29 class PluginResolver(object):
30 def __init__(self, options_bootstrapper):
31 self._options_bootstrapper = options_bootstrapper
32
33 bootstrap_options = self._options_bootstrapper.get_bootstrap_options().for_global_scope()
34 self._plugin_requirements = bootstrap_options.plugins
35 self._plugin_cache_dir = bootstrap_options.plugin_cache_dir
36
37 def resolve(self, working_set=None):
38 """Resolves any configured plugins and adds them to the global working set.
39
40 :param working_set: The working set to add the resolved plugins to instead of the global
41 working set (for testing).
42 :type: :class:`pkg_resources.WorkingSet`
43 """
44 working_set = working_set or global_working_set
45 if self._plugin_requirements:
46 for plugin_location in self._resolve_plugin_locations():
47 working_set.add_entry(plugin_location)
48 return working_set
49
50 def _resolve_plugin_locations(self):
51 # We jump through some hoops here to avoid a live resolve if possible for purposes of speed.
52 # Even with a local resolve cache fully up to date, running a resolve to activate a plugin
53 # takes ~250ms whereas loading from a pre-cached list takes ~50ms.
54 if all(requirement_is_exact(Requirement.parse(req)) for req in self._plugin_requirements):
55 return self._resolve_exact_plugin_locations()
56 else:
57 return (plugin.location for plugin in self._resolve_plugins())
58
59 def _resolve_exact_plugin_locations(self):
60 hasher = hashlib.sha1()
61 for req in sorted(self._plugin_requirements):
62 hasher.update(req)
63 resolve_hash = hasher.hexdigest()
64 resolved_plugins_list = os.path.join(self.plugin_cache_dir,
65 'plugins-{}.txt'.format(resolve_hash))
66
67 if not os.path.exists(resolved_plugins_list):
68 tmp_plugins_list = resolved_plugins_list + '~'
69 with safe_open(tmp_plugins_list, 'w') as fp:
70 for plugin in self._resolve_plugins():
71 fp.write(plugin.location)
72 fp.write('\n')
73 os.rename(tmp_plugins_list, resolved_plugins_list)
74 with open(resolved_plugins_list) as fp:
75 for plugin_location in fp:
76 yield plugin_location.strip()
77
78 def _resolve_plugins(self):
79 # When bootstrapping plugins without the full pants python backend machinery in-play, we are not
80 # guaranteed a properly initialized interpreter with wheel support so we enforce eggs only for
81 # bdists with this custom precedence.
82 precedence = (EggPackage, SourcePackage)
83 logger.info('Resolving new plugins...:\n {}'.format('\n '.join(self._plugin_requirements)))
84 return resolver.resolve(self._plugin_requirements,
85 fetchers=self._python_repos.get_fetchers(),
86 context=self._python_repos.get_network_context(),
87 precedence=precedence,
88 cache=self.plugin_cache_dir,
89 cache_ttl=10 * 365 * 24 * 60 * 60, # Effectively never expire.
90 allow_prereleases=PANTS_SEMVER.is_prerelease)
91
92 @memoized_property
93 def plugin_cache_dir(self):
94 """The path of the directory pants plugins bdists are cached in."""
95 return self._plugin_cache_dir
96
97 @memoized_property
98 def _python_repos(self):
99 return self._create_global_subsystem(PythonRepos)
100
101 def _create_global_subsystem(self, subsystem_type):
102 options_scope = subsystem_type.options_scope
103 return subsystem_type(options_scope, self._options.for_scope(options_scope))
104
105 @memoized_property
106 def _options(self):
107 # NB: The PluginResolver runs very early in the pants startup sequence before the standard
108 # Subsystem facility is wired up. As a result PluginResolver is not itself a Subsystem with
109 # PythonRepos as a dependency. Instead it does the minimum possible work to hand-roll
110 # bootstrapping of the Subsystem it needs.
111 subsystems = Subsystem.closure([PythonRepos])
112 known_scope_infos = [subsystem.get_scope_info() for subsystem in subsystems]
113 options = self._options_bootstrapper.get_full_options(known_scope_infos)
114
115 # Ignore command line flags since we'd blow up on any we don't understand (most of them).
116 # If someone wants to bootstrap plugins in a one-off custom way they'll need to use env vars
117 # or a --pants-config-files pointing to a custom pants.ini snippet.
118 defaulted_only_options = options.drop_flag_values()
119
120 GlobalOptionsRegistrar.register_options_on_scope(defaulted_only_options)
121 for subsystem in subsystems:
122 subsystem.register_options_on_scope(defaulted_only_options)
123 return defaulted_only_options
124
[end of src/python/pants/init/plugin_resolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/init/plugin_resolver.py b/src/python/pants/init/plugin_resolver.py
--- a/src/python/pants/init/plugin_resolver.py
+++ b/src/python/pants/init/plugin_resolver.py
@@ -8,17 +8,18 @@
import hashlib
import logging
import os
+import site
from pex import resolver
from pex.base import requirement_is_exact
-from pex.package import EggPackage, SourcePackage
from pkg_resources import working_set as global_working_set
from pkg_resources import Requirement
+from wheel.install import WheelFile
from pants.option.global_options import GlobalOptionsRegistrar
from pants.python.python_repos import PythonRepos
from pants.subsystem.subsystem import Subsystem
-from pants.util.dirutil import safe_open
+from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.memo import memoized_property
from pants.version import PANTS_SEMVER
@@ -27,6 +28,26 @@
class PluginResolver(object):
+ @staticmethod
+ def _is_wheel(path):
+ return os.path.isfile(path) and path.endswith('.whl')
+
+ @staticmethod
+ def _activate_wheel(wheel_path):
+ install_dir = '{}-install'.format(wheel_path)
+ safe_mkdir(install_dir, clean=True)
+ WheelFile(wheel_path).install(force=True,
+ overrides={
+ 'purelib': install_dir,
+ 'headers': os.path.join(install_dir, 'headers'),
+ 'scripts': os.path.join(install_dir, 'bin'),
+ 'platlib': install_dir,
+ 'data': install_dir
+ })
+ # Activate any .pth files installed above.
+ site.addsitedir(install_dir)
+ return install_dir
+
def __init__(self, options_bootstrapper):
self._options_bootstrapper = options_bootstrapper
@@ -44,6 +65,8 @@
working_set = working_set or global_working_set
if self._plugin_requirements:
for plugin_location in self._resolve_plugin_locations():
+ if self._is_wheel(plugin_location):
+ plugin_location = self._activate_wheel(plugin_location)
working_set.add_entry(plugin_location)
return working_set
@@ -76,15 +99,10 @@
yield plugin_location.strip()
def _resolve_plugins(self):
- # When bootstrapping plugins without the full pants python backend machinery in-play, we are not
- # guaranteed a properly initialized interpreter with wheel support so we enforce eggs only for
- # bdists with this custom precedence.
- precedence = (EggPackage, SourcePackage)
logger.info('Resolving new plugins...:\n {}'.format('\n '.join(self._plugin_requirements)))
return resolver.resolve(self._plugin_requirements,
fetchers=self._python_repos.get_fetchers(),
context=self._python_repos.get_network_context(),
- precedence=precedence,
cache=self.plugin_cache_dir,
cache_ttl=10 * 365 * 24 * 60 * 60, # Effectively never expire.
allow_prereleases=PANTS_SEMVER.is_prerelease)
|
{"golden_diff": "diff --git a/src/python/pants/init/plugin_resolver.py b/src/python/pants/init/plugin_resolver.py\n--- a/src/python/pants/init/plugin_resolver.py\n+++ b/src/python/pants/init/plugin_resolver.py\n@@ -8,17 +8,18 @@\n import hashlib\n import logging\n import os\n+import site\n \n from pex import resolver\n from pex.base import requirement_is_exact\n-from pex.package import EggPackage, SourcePackage\n from pkg_resources import working_set as global_working_set\n from pkg_resources import Requirement\n+from wheel.install import WheelFile\n \n from pants.option.global_options import GlobalOptionsRegistrar\n from pants.python.python_repos import PythonRepos\n from pants.subsystem.subsystem import Subsystem\n-from pants.util.dirutil import safe_open\n+from pants.util.dirutil import safe_mkdir, safe_open\n from pants.util.memo import memoized_property\n from pants.version import PANTS_SEMVER\n \n@@ -27,6 +28,26 @@\n \n \n class PluginResolver(object):\n+ @staticmethod\n+ def _is_wheel(path):\n+ return os.path.isfile(path) and path.endswith('.whl')\n+\n+ @staticmethod\n+ def _activate_wheel(wheel_path):\n+ install_dir = '{}-install'.format(wheel_path)\n+ safe_mkdir(install_dir, clean=True)\n+ WheelFile(wheel_path).install(force=True,\n+ overrides={\n+ 'purelib': install_dir,\n+ 'headers': os.path.join(install_dir, 'headers'),\n+ 'scripts': os.path.join(install_dir, 'bin'),\n+ 'platlib': install_dir,\n+ 'data': install_dir\n+ })\n+ # Activate any .pth files installed above.\n+ site.addsitedir(install_dir)\n+ return install_dir\n+\n def __init__(self, options_bootstrapper):\n self._options_bootstrapper = options_bootstrapper\n \n@@ -44,6 +65,8 @@\n working_set = working_set or global_working_set\n if self._plugin_requirements:\n for plugin_location in self._resolve_plugin_locations():\n+ if self._is_wheel(plugin_location):\n+ plugin_location = self._activate_wheel(plugin_location)\n working_set.add_entry(plugin_location)\n return working_set\n \n@@ -76,15 +99,10 @@\n yield plugin_location.strip()\n \n def _resolve_plugins(self):\n- # When bootstrapping plugins without the full pants python backend machinery in-play, we are not\n- # guaranteed a properly initialized interpreter with wheel support so we enforce eggs only for\n- # bdists with this custom precedence.\n- precedence = (EggPackage, SourcePackage)\n logger.info('Resolving new plugins...:\\n {}'.format('\\n '.join(self._plugin_requirements)))\n return resolver.resolve(self._plugin_requirements,\n fetchers=self._python_repos.get_fetchers(),\n context=self._python_repos.get_network_context(),\n- precedence=precedence,\n cache=self.plugin_cache_dir,\n cache_ttl=10 * 365 * 24 * 60 * 60, # Effectively never expire.\n allow_prereleases=PANTS_SEMVER.is_prerelease)\n", "issue": "Prefer wheels during plugin install\nPants has sprouted a dependency on a package that is most easily installed via a `whl` on pypi (`openssl` for `requests[security]`).\r\n\r\nBut currently the plugin installation infrastructure does not enable usage of `WheelPackage` due to a defensive avoidance of assuming that `wheel` is installed in an environment that pants is being loaded from:\r\nhttps://github.com/pantsbuild/pants/blob/e0d5108ff75a41421a11321c73c817332f0a1c86/src/python/pants/init/plugin_resolver.py#L79-L82\r\n\r\nAs far as @kwlzn or I can tell, `wheel` should always be on the PYTHONPATH, as pants itself depends on it: it's not clear that it actually needs to be \"installed\" on the PATH as well.\r\n\r\ncc @jsirois for any context he might have on that comment.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport hashlib\nimport logging\nimport os\n\nfrom pex import resolver\nfrom pex.base import requirement_is_exact\nfrom pex.package import EggPackage, SourcePackage\nfrom pkg_resources import working_set as global_working_set\nfrom pkg_resources import Requirement\n\nfrom pants.option.global_options import GlobalOptionsRegistrar\nfrom pants.python.python_repos import PythonRepos\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.dirutil import safe_open\nfrom pants.util.memo import memoized_property\nfrom pants.version import PANTS_SEMVER\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PluginResolver(object):\n def __init__(self, options_bootstrapper):\n self._options_bootstrapper = options_bootstrapper\n\n bootstrap_options = self._options_bootstrapper.get_bootstrap_options().for_global_scope()\n self._plugin_requirements = bootstrap_options.plugins\n self._plugin_cache_dir = bootstrap_options.plugin_cache_dir\n\n def resolve(self, working_set=None):\n \"\"\"Resolves any configured plugins and adds them to the global working set.\n\n :param working_set: The working set to add the resolved plugins to instead of the global\n working set (for testing).\n :type: :class:`pkg_resources.WorkingSet`\n \"\"\"\n working_set = working_set or global_working_set\n if self._plugin_requirements:\n for plugin_location in self._resolve_plugin_locations():\n working_set.add_entry(plugin_location)\n return working_set\n\n def _resolve_plugin_locations(self):\n # We jump through some hoops here to avoid a live resolve if possible for purposes of speed.\n # Even with a local resolve cache fully up to date, running a resolve to activate a plugin\n # takes ~250ms whereas loading from a pre-cached list takes ~50ms.\n if all(requirement_is_exact(Requirement.parse(req)) for req in self._plugin_requirements):\n return self._resolve_exact_plugin_locations()\n else:\n return (plugin.location for plugin in self._resolve_plugins())\n\n def _resolve_exact_plugin_locations(self):\n hasher = hashlib.sha1()\n for req in sorted(self._plugin_requirements):\n hasher.update(req)\n resolve_hash = hasher.hexdigest()\n resolved_plugins_list = os.path.join(self.plugin_cache_dir,\n 'plugins-{}.txt'.format(resolve_hash))\n\n if not os.path.exists(resolved_plugins_list):\n tmp_plugins_list = resolved_plugins_list + '~'\n with safe_open(tmp_plugins_list, 'w') as fp:\n for plugin in self._resolve_plugins():\n fp.write(plugin.location)\n fp.write('\\n')\n os.rename(tmp_plugins_list, resolved_plugins_list)\n with open(resolved_plugins_list) as fp:\n for plugin_location in fp:\n yield plugin_location.strip()\n\n def _resolve_plugins(self):\n # When bootstrapping plugins without the full pants python backend machinery in-play, we are not\n # guaranteed a properly initialized interpreter with wheel support so we enforce eggs only for\n # bdists with this custom precedence.\n precedence = (EggPackage, SourcePackage)\n logger.info('Resolving new plugins...:\\n {}'.format('\\n '.join(self._plugin_requirements)))\n return resolver.resolve(self._plugin_requirements,\n fetchers=self._python_repos.get_fetchers(),\n context=self._python_repos.get_network_context(),\n precedence=precedence,\n cache=self.plugin_cache_dir,\n cache_ttl=10 * 365 * 24 * 60 * 60, # Effectively never expire.\n allow_prereleases=PANTS_SEMVER.is_prerelease)\n\n @memoized_property\n def plugin_cache_dir(self):\n \"\"\"The path of the directory pants plugins bdists are cached in.\"\"\"\n return self._plugin_cache_dir\n\n @memoized_property\n def _python_repos(self):\n return self._create_global_subsystem(PythonRepos)\n\n def _create_global_subsystem(self, subsystem_type):\n options_scope = subsystem_type.options_scope\n return subsystem_type(options_scope, self._options.for_scope(options_scope))\n\n @memoized_property\n def _options(self):\n # NB: The PluginResolver runs very early in the pants startup sequence before the standard\n # Subsystem facility is wired up. As a result PluginResolver is not itself a Subsystem with\n # PythonRepos as a dependency. Instead it does the minimum possible work to hand-roll\n # bootstrapping of the Subsystem it needs.\n subsystems = Subsystem.closure([PythonRepos])\n known_scope_infos = [subsystem.get_scope_info() for subsystem in subsystems]\n options = self._options_bootstrapper.get_full_options(known_scope_infos)\n\n # Ignore command line flags since we'd blow up on any we don't understand (most of them).\n # If someone wants to bootstrap plugins in a one-off custom way they'll need to use env vars\n # or a --pants-config-files pointing to a custom pants.ini snippet.\n defaulted_only_options = options.drop_flag_values()\n\n GlobalOptionsRegistrar.register_options_on_scope(defaulted_only_options)\n for subsystem in subsystems:\n subsystem.register_options_on_scope(defaulted_only_options)\n return defaulted_only_options\n", "path": "src/python/pants/init/plugin_resolver.py"}]}
| 2,189 | 693 |
gh_patches_debug_30174
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6355
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uvicorn - Add to Project doesn't work
Confirmed on Windows and Linux
When adding a tap to the project I get a failure

```
2022-06-27T21:30:17.510848Z [error ] Exception on /api/v1/plugins/install/batch [POST]
Traceback (most recent call last):
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/meltano/api/security/auth.py", line 125, in decorated
return f(*args, **kwargs)
File "/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/meltano/api/controllers/plugins.py", line 151, in install_batch
related_plugins = add_service.add_related(plugin)
AttributeError: 'ProjectAddService' object has no attribute 'add_related'
```
</issue>
<code>
[start of src/meltano/api/controllers/plugins.py]
1 """API Plugin Management Blue Print."""
2
3 import asyncio
4 import logging
5
6 from flask import jsonify, request
7
8 from meltano.api.api_blueprint import APIBlueprint
9 from meltano.api.security.auth import block_if_readonly
10 from meltano.core.error import PluginInstallError
11 from meltano.core.plugin import PluginType
12 from meltano.core.plugin.project_plugin import ProjectPlugin
13 from meltano.core.plugin_discovery_service import (
14 PluginDiscoveryService,
15 PluginNotFoundError,
16 )
17 from meltano.core.plugin_install_service import (
18 PluginInstallReason,
19 PluginInstallService,
20 )
21 from meltano.core.project import Project
22 from meltano.core.project_add_service import ProjectAddService
23 from meltano.core.project_plugins_service import ProjectPluginsService
24
25
26 def plugin_def_json(plugin_def):
27 """Convert plugin defenition to json.
28
29 Args:
30 plugin_def: Plugin definition
31
32 Returns:
33 JSON of the plugin's definition
34 """
35 return {
36 "name": plugin_def.name,
37 "namespace": plugin_def.namespace,
38 "hidden": plugin_def.hidden,
39 "label": plugin_def.label,
40 "logo_url": plugin_def.logo_url,
41 "description": plugin_def.description,
42 "variants": [
43 {
44 "name": v.name, # noqa: WPS111
45 "default": i == 0, # noqa: WPS111
46 "deprecated": v.deprecated,
47 }
48 for i, v in enumerate(plugin_def.variants) # noqa: WPS111
49 ],
50 }
51
52
53 pluginsBP = APIBlueprint("plugins", __name__) # noqa: N816
54
55
56 @pluginsBP.errorhandler(PluginInstallError)
57 def _handle(ex):
58 return (jsonify({"error": True, "code": str(ex)}), 502)
59
60
61 @pluginsBP.route("/all", methods=["GET"]) # noqa: WPS125
62 def all():
63 """Plugins found by the PluginDiscoveryService.
64
65 Returns:
66 Json containing all the discovered plugins.
67 """
68 project = Project.find()
69 discovery = PluginDiscoveryService(project)
70
71 all_plugins = {
72 plugin_type: [plugin_def_json(plugin_def) for plugin_def in plugin_defs]
73 for plugin_type, plugin_defs in discovery.plugins_by_type().items()
74 }
75
76 return jsonify(all_plugins)
77
78
79 @pluginsBP.route("/installed", methods=["GET"])
80 def installed():
81 """All plugins installed in the project.
82
83 Returns:
84 Json of all installed plugins.
85 """
86 project = Project.find()
87 plugins_service = ProjectPluginsService(project)
88
89 def _plugin_json(plugin: ProjectPlugin):
90 plugin_json = {"name": plugin.name}
91
92 try:
93 plugin_json.update(plugin_def_json(plugin))
94
95 plugin_json["variant"] = plugin.variant
96 plugin_json["docs"] = plugin.docs
97 except PluginNotFoundError:
98 pass
99
100 return plugin_json
101
102 installed_plugins = {
103 plugin_type: [_plugin_json(plugin) for plugin in plugins]
104 for plugin_type, plugins in plugins_service.plugins_by_type().items()
105 }
106
107 return jsonify(installed_plugins)
108
109
110 @pluginsBP.route("/add", methods=["POST"])
111 @block_if_readonly
112 def add():
113 """Add Plugin the the project file.
114
115 Returns:
116 JSON of the plugin information added.
117 """
118 payload = request.get_json()
119 plugin_type = PluginType(payload["plugin_type"])
120 plugin_name = payload["name"]
121 variant = payload.get("variant", None)
122
123 project = Project.find()
124 add_service = ProjectAddService(project)
125 plugin = add_service.add(plugin_type, plugin_name, variant=variant)
126
127 return jsonify(plugin.canonical())
128
129
130 @pluginsBP.route("/install/batch", methods=["POST"])
131 @block_if_readonly
132 def install_batch(): # noqa: WPS210
133 """Install multiple plugins at once.
134
135 Raises:
136 PluginInstallError: Plugin insatllation error message.
137
138 Returns:
139 JSON cotaining all plugins installed.
140 """
141 payload = request.get_json()
142 project = Project.find()
143
144 plugins_service = ProjectPluginsService(project)
145 plugin = plugins_service.find_plugin(
146 payload["name"], plugin_type=PluginType(payload["plugin_type"])
147 )
148
149 add_service = ProjectAddService(project, plugins_service=plugins_service)
150 related_plugins = add_service.add_related(plugin)
151
152 # We will install the plugins in reverse order, since dependencies
153 # are listed after their dependents in `related_plugins`, but should
154 # be installed first.
155 related_plugins.reverse()
156
157 # This was added to assist api_worker threads
158 try:
159 asyncio.get_event_loop()
160 except RuntimeError:
161 logging.debug("/plugins/install/batch no asyncio event loop detected")
162 asyncio.set_event_loop(asyncio.new_event_loop())
163
164 install_service = PluginInstallService(project, plugins_service=plugins_service)
165 install_results = install_service.install_plugins(
166 related_plugins, reason=PluginInstallReason.ADD
167 )
168
169 for result in install_results:
170 if not result.successful:
171 raise PluginInstallError(result.message)
172
173 return jsonify([plugin.canonical() for plugin in related_plugins])
174
175
176 @pluginsBP.route("/install", methods=["POST"])
177 @block_if_readonly
178 def install():
179 """Install a plugin.
180
181 Returns:
182 JSON containing the plugin installed.
183 """
184 payload = request.get_json()
185 plugin_type = PluginType(payload["plugin_type"])
186 plugin_name = payload["name"]
187
188 project = Project.find()
189
190 plugins_service = ProjectPluginsService(project)
191 plugin = plugins_service.find_plugin(plugin_name, plugin_type=plugin_type)
192
193 # This was added to assist api_worker threads
194 try:
195 asyncio.get_event_loop()
196 except RuntimeError:
197 logging.debug("/plugins/install no asyncio event loop detected")
198 asyncio.set_event_loop(asyncio.new_event_loop())
199
200 install_service = PluginInstallService(project, plugins_service=plugins_service)
201 install_service.install_plugin(plugin, reason=PluginInstallReason.ADD)
202
203 return jsonify(plugin.canonical())
204
[end of src/meltano/api/controllers/plugins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/api/controllers/plugins.py b/src/meltano/api/controllers/plugins.py
--- a/src/meltano/api/controllers/plugins.py
+++ b/src/meltano/api/controllers/plugins.py
@@ -58,8 +58,8 @@
return (jsonify({"error": True, "code": str(ex)}), 502)
[email protected]("/all", methods=["GET"]) # noqa: WPS125
-def all():
[email protected]("/all", methods=["GET"])
+def all(): # noqa: WPS125
"""Plugins found by the PluginDiscoveryService.
Returns:
@@ -147,12 +147,7 @@
)
add_service = ProjectAddService(project, plugins_service=plugins_service)
- related_plugins = add_service.add_related(plugin)
-
- # We will install the plugins in reverse order, since dependencies
- # are listed after their dependents in `related_plugins`, but should
- # be installed first.
- related_plugins.reverse()
+ required_plugins = add_service.add_required(plugin)
# This was added to assist api_worker threads
try:
@@ -163,14 +158,14 @@
install_service = PluginInstallService(project, plugins_service=plugins_service)
install_results = install_service.install_plugins(
- related_plugins, reason=PluginInstallReason.ADD
+ required_plugins, reason=PluginInstallReason.ADD
)
for result in install_results:
if not result.successful:
raise PluginInstallError(result.message)
- return jsonify([plugin.canonical() for plugin in related_plugins])
+ return jsonify([plugin.canonical() for plugin in required_plugins])
@pluginsBP.route("/install", methods=["POST"])
|
{"golden_diff": "diff --git a/src/meltano/api/controllers/plugins.py b/src/meltano/api/controllers/plugins.py\n--- a/src/meltano/api/controllers/plugins.py\n+++ b/src/meltano/api/controllers/plugins.py\n@@ -58,8 +58,8 @@\n return (jsonify({\"error\": True, \"code\": str(ex)}), 502)\n \n \[email protected](\"/all\", methods=[\"GET\"]) # noqa: WPS125\n-def all():\[email protected](\"/all\", methods=[\"GET\"])\n+def all(): # noqa: WPS125\n \"\"\"Plugins found by the PluginDiscoveryService.\n \n Returns:\n@@ -147,12 +147,7 @@\n )\n \n add_service = ProjectAddService(project, plugins_service=plugins_service)\n- related_plugins = add_service.add_related(plugin)\n-\n- # We will install the plugins in reverse order, since dependencies\n- # are listed after their dependents in `related_plugins`, but should\n- # be installed first.\n- related_plugins.reverse()\n+ required_plugins = add_service.add_required(plugin)\n \n # This was added to assist api_worker threads\n try:\n@@ -163,14 +158,14 @@\n \n install_service = PluginInstallService(project, plugins_service=plugins_service)\n install_results = install_service.install_plugins(\n- related_plugins, reason=PluginInstallReason.ADD\n+ required_plugins, reason=PluginInstallReason.ADD\n )\n \n for result in install_results:\n if not result.successful:\n raise PluginInstallError(result.message)\n \n- return jsonify([plugin.canonical() for plugin in related_plugins])\n+ return jsonify([plugin.canonical() for plugin in required_plugins])\n \n \n @pluginsBP.route(\"/install\", methods=[\"POST\"])\n", "issue": "Uvicorn - Add to Project doesn't work\nConfirmed on Windows and Linux\r\n\r\nWhen adding a tap to the project I get a failure\r\n\r\n\r\n\r\n```\r\n2022-06-27T21:30:17.510848Z [error ] Exception on /api/v1/plugins/install/batch [POST]\r\nTraceback (most recent call last):\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask_restful/__init__.py\", line 271, in error_router\r\n return original_handler(e)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask_restful/__init__.py\", line 271, in error_router\r\n return original_handler(e)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/meltano/api/security/auth.py\", line 125, in decorated\r\n return f(*args, **kwargs)\r\n File \"/home/visch/.local/pipx/venvs/meltano/lib/python3.8/site-packages/meltano/api/controllers/plugins.py\", line 151, in install_batch\r\n related_plugins = add_service.add_related(plugin)\r\nAttributeError: 'ProjectAddService' object has no attribute 'add_related'\r\n```\n", "before_files": [{"content": "\"\"\"API Plugin Management Blue Print.\"\"\"\n\nimport asyncio\nimport logging\n\nfrom flask import jsonify, request\n\nfrom meltano.api.api_blueprint import APIBlueprint\nfrom meltano.api.security.auth import block_if_readonly\nfrom meltano.core.error import PluginInstallError\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.plugin_discovery_service import (\n PluginDiscoveryService,\n PluginNotFoundError,\n)\nfrom meltano.core.plugin_install_service import (\n PluginInstallReason,\n PluginInstallService,\n)\nfrom meltano.core.project import Project\nfrom meltano.core.project_add_service import ProjectAddService\nfrom meltano.core.project_plugins_service import ProjectPluginsService\n\n\ndef plugin_def_json(plugin_def):\n \"\"\"Convert plugin defenition to json.\n\n Args:\n plugin_def: Plugin definition\n\n Returns:\n JSON of the plugin's definition\n \"\"\"\n return {\n \"name\": plugin_def.name,\n \"namespace\": plugin_def.namespace,\n \"hidden\": plugin_def.hidden,\n \"label\": plugin_def.label,\n \"logo_url\": plugin_def.logo_url,\n \"description\": plugin_def.description,\n \"variants\": [\n {\n \"name\": v.name, # noqa: WPS111\n \"default\": i == 0, # noqa: WPS111\n \"deprecated\": v.deprecated,\n }\n for i, v in enumerate(plugin_def.variants) # noqa: WPS111\n ],\n }\n\n\npluginsBP = APIBlueprint(\"plugins\", __name__) # noqa: N816\n\n\[email protected](PluginInstallError)\ndef _handle(ex):\n return (jsonify({\"error\": True, \"code\": str(ex)}), 502)\n\n\[email protected](\"/all\", methods=[\"GET\"]) # noqa: WPS125\ndef all():\n \"\"\"Plugins found by the PluginDiscoveryService.\n\n Returns:\n Json containing all the discovered plugins.\n \"\"\"\n project = Project.find()\n discovery = PluginDiscoveryService(project)\n\n all_plugins = {\n plugin_type: [plugin_def_json(plugin_def) for plugin_def in plugin_defs]\n for plugin_type, plugin_defs in discovery.plugins_by_type().items()\n }\n\n return jsonify(all_plugins)\n\n\[email protected](\"/installed\", methods=[\"GET\"])\ndef installed():\n \"\"\"All plugins installed in the project.\n\n Returns:\n Json of all installed plugins.\n \"\"\"\n project = Project.find()\n plugins_service = ProjectPluginsService(project)\n\n def _plugin_json(plugin: ProjectPlugin):\n plugin_json = {\"name\": plugin.name}\n\n try:\n plugin_json.update(plugin_def_json(plugin))\n\n plugin_json[\"variant\"] = plugin.variant\n plugin_json[\"docs\"] = plugin.docs\n except PluginNotFoundError:\n pass\n\n return plugin_json\n\n installed_plugins = {\n plugin_type: [_plugin_json(plugin) for plugin in plugins]\n for plugin_type, plugins in plugins_service.plugins_by_type().items()\n }\n\n return jsonify(installed_plugins)\n\n\[email protected](\"/add\", methods=[\"POST\"])\n@block_if_readonly\ndef add():\n \"\"\"Add Plugin the the project file.\n\n Returns:\n JSON of the plugin information added.\n \"\"\"\n payload = request.get_json()\n plugin_type = PluginType(payload[\"plugin_type\"])\n plugin_name = payload[\"name\"]\n variant = payload.get(\"variant\", None)\n\n project = Project.find()\n add_service = ProjectAddService(project)\n plugin = add_service.add(plugin_type, plugin_name, variant=variant)\n\n return jsonify(plugin.canonical())\n\n\[email protected](\"/install/batch\", methods=[\"POST\"])\n@block_if_readonly\ndef install_batch(): # noqa: WPS210\n \"\"\"Install multiple plugins at once.\n\n Raises:\n PluginInstallError: Plugin insatllation error message.\n\n Returns:\n JSON cotaining all plugins installed.\n \"\"\"\n payload = request.get_json()\n project = Project.find()\n\n plugins_service = ProjectPluginsService(project)\n plugin = plugins_service.find_plugin(\n payload[\"name\"], plugin_type=PluginType(payload[\"plugin_type\"])\n )\n\n add_service = ProjectAddService(project, plugins_service=plugins_service)\n related_plugins = add_service.add_related(plugin)\n\n # We will install the plugins in reverse order, since dependencies\n # are listed after their dependents in `related_plugins`, but should\n # be installed first.\n related_plugins.reverse()\n\n # This was added to assist api_worker threads\n try:\n asyncio.get_event_loop()\n except RuntimeError:\n logging.debug(\"/plugins/install/batch no asyncio event loop detected\")\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n install_service = PluginInstallService(project, plugins_service=plugins_service)\n install_results = install_service.install_plugins(\n related_plugins, reason=PluginInstallReason.ADD\n )\n\n for result in install_results:\n if not result.successful:\n raise PluginInstallError(result.message)\n\n return jsonify([plugin.canonical() for plugin in related_plugins])\n\n\[email protected](\"/install\", methods=[\"POST\"])\n@block_if_readonly\ndef install():\n \"\"\"Install a plugin.\n\n Returns:\n JSON containing the plugin installed.\n \"\"\"\n payload = request.get_json()\n plugin_type = PluginType(payload[\"plugin_type\"])\n plugin_name = payload[\"name\"]\n\n project = Project.find()\n\n plugins_service = ProjectPluginsService(project)\n plugin = plugins_service.find_plugin(plugin_name, plugin_type=plugin_type)\n\n # This was added to assist api_worker threads\n try:\n asyncio.get_event_loop()\n except RuntimeError:\n logging.debug(\"/plugins/install no asyncio event loop detected\")\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n install_service = PluginInstallService(project, plugins_service=plugins_service)\n install_service.install_plugin(plugin, reason=PluginInstallReason.ADD)\n\n return jsonify(plugin.canonical())\n", "path": "src/meltano/api/controllers/plugins.py"}]}
| 3,000 | 393 |
gh_patches_debug_5579
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-6373
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Outfile -w cannot be loaded
#### Problem Description
When the dump file is getting bigger, about 100mb it's not loaded anymore.
#### Steps to reproduce the behavior:
Make a big outfile and try to open it with an new instance of mitmweb.
#### System Information
Mitmweb Windows 10 6.0.2
mitmweb Not loading my saved flow
So I recorded some actions with mitmweb and saved the flow.
Then I closed mitmweb, and reopened it. Then I went to open the saved flow file (which is 100 megabytes). But when I open it, the requests and responses do not appear?
</issue>
<code>
[start of mitmproxy/tools/web/master.py]
1 import errno
2 import logging
3
4 import tornado.httpserver
5 import tornado.ioloop
6
7 from mitmproxy import addons
8 from mitmproxy import flow
9 from mitmproxy import log
10 from mitmproxy import master
11 from mitmproxy import options
12 from mitmproxy import optmanager
13 from mitmproxy.addons import errorcheck
14 from mitmproxy.addons import eventstore
15 from mitmproxy.addons import intercept
16 from mitmproxy.addons import readfile
17 from mitmproxy.addons import termlog
18 from mitmproxy.addons import view
19 from mitmproxy.addons.proxyserver import Proxyserver
20 from mitmproxy.tools.web import app
21 from mitmproxy.tools.web import static_viewer
22 from mitmproxy.tools.web import webaddons
23
24 logger = logging.getLogger(__name__)
25
26
27 class WebMaster(master.Master):
28 def __init__(self, opts: options.Options, with_termlog: bool = True):
29 super().__init__(opts)
30 self.view = view.View()
31 self.view.sig_view_add.connect(self._sig_view_add)
32 self.view.sig_view_remove.connect(self._sig_view_remove)
33 self.view.sig_view_update.connect(self._sig_view_update)
34 self.view.sig_view_refresh.connect(self._sig_view_refresh)
35
36 self.events = eventstore.EventStore()
37 self.events.sig_add.connect(self._sig_events_add)
38 self.events.sig_refresh.connect(self._sig_events_refresh)
39
40 self.options.changed.connect(self._sig_options_update)
41
42 if with_termlog:
43 self.addons.add(termlog.TermLog())
44 self.addons.add(*addons.default_addons())
45 self.addons.add(
46 webaddons.WebAddon(),
47 intercept.Intercept(),
48 readfile.ReadFile(),
49 static_viewer.StaticViewer(),
50 self.view,
51 self.events,
52 errorcheck.ErrorCheck(),
53 )
54 self.app = app.Application(self, self.options.web_debug)
55 self.proxyserver: Proxyserver = self.addons.get("proxyserver")
56 self.proxyserver.servers.changed.connect(self._sig_servers_changed)
57
58 def _sig_view_add(self, flow: flow.Flow) -> None:
59 app.ClientConnection.broadcast(
60 resource="flows", cmd="add", data=app.flow_to_json(flow)
61 )
62
63 def _sig_view_update(self, flow: flow.Flow) -> None:
64 app.ClientConnection.broadcast(
65 resource="flows", cmd="update", data=app.flow_to_json(flow)
66 )
67
68 def _sig_view_remove(self, flow: flow.Flow, index: int) -> None:
69 app.ClientConnection.broadcast(resource="flows", cmd="remove", data=flow.id)
70
71 def _sig_view_refresh(self) -> None:
72 app.ClientConnection.broadcast(resource="flows", cmd="reset")
73
74 def _sig_events_add(self, entry: log.LogEntry) -> None:
75 app.ClientConnection.broadcast(
76 resource="events", cmd="add", data=app.logentry_to_json(entry)
77 )
78
79 def _sig_events_refresh(self) -> None:
80 app.ClientConnection.broadcast(resource="events", cmd="reset")
81
82 def _sig_options_update(self, updated: set[str]) -> None:
83 options_dict = optmanager.dump_dicts(self.options, updated)
84 app.ClientConnection.broadcast(
85 resource="options", cmd="update", data=options_dict
86 )
87
88 def _sig_servers_changed(self) -> None:
89 app.ClientConnection.broadcast(
90 resource="state",
91 cmd="update",
92 data={"servers": [s.to_json() for s in self.proxyserver.servers]},
93 )
94
95 async def running(self):
96 # Register tornado with the current event loop
97 tornado.ioloop.IOLoop.current()
98
99 # Add our web app.
100 http_server = tornado.httpserver.HTTPServer(self.app)
101 try:
102 http_server.listen(self.options.web_port, self.options.web_host)
103 except OSError as e:
104 message = f"Web server failed to listen on {self.options.web_host or '*'}:{self.options.web_port} with {e}"
105 if e.errno == errno.EADDRINUSE:
106 message += f"\nTry specifying a different port by using `--set web_port={self.options.web_port + 2}`."
107 raise OSError(e.errno, message, e.filename) from e
108
109 logger.info(
110 f"Web server listening at http://{self.options.web_host}:{self.options.web_port}/",
111 )
112
113 return await super().running()
114
[end of mitmproxy/tools/web/master.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/tools/web/master.py b/mitmproxy/tools/web/master.py
--- a/mitmproxy/tools/web/master.py
+++ b/mitmproxy/tools/web/master.py
@@ -97,7 +97,9 @@
tornado.ioloop.IOLoop.current()
# Add our web app.
- http_server = tornado.httpserver.HTTPServer(self.app)
+ http_server = tornado.httpserver.HTTPServer(
+ self.app, max_buffer_size=2**32
+ ) # 4GB
try:
http_server.listen(self.options.web_port, self.options.web_host)
except OSError as e:
|
{"golden_diff": "diff --git a/mitmproxy/tools/web/master.py b/mitmproxy/tools/web/master.py\n--- a/mitmproxy/tools/web/master.py\n+++ b/mitmproxy/tools/web/master.py\n@@ -97,7 +97,9 @@\n tornado.ioloop.IOLoop.current()\n \n # Add our web app.\n- http_server = tornado.httpserver.HTTPServer(self.app)\n+ http_server = tornado.httpserver.HTTPServer(\n+ self.app, max_buffer_size=2**32\n+ ) # 4GB\n try:\n http_server.listen(self.options.web_port, self.options.web_host)\n except OSError as e:\n", "issue": "Outfile -w cannot be loaded\n#### Problem Description\nWhen the dump file is getting bigger, about 100mb it's not loaded anymore.\n\n#### Steps to reproduce the behavior:\nMake a big outfile and try to open it with an new instance of mitmweb.\n\n#### System Information\nMitmweb Windows 10 6.0.2\nmitmweb Not loading my saved flow\nSo I recorded some actions with mitmweb and saved the flow.\r\n\r\nThen I closed mitmweb, and reopened it. Then I went to open the saved flow file (which is 100 megabytes). But when I open it, the requests and responses do not appear?\r\n\n", "before_files": [{"content": "import errno\nimport logging\n\nimport tornado.httpserver\nimport tornado.ioloop\n\nfrom mitmproxy import addons\nfrom mitmproxy import flow\nfrom mitmproxy import log\nfrom mitmproxy import master\nfrom mitmproxy import options\nfrom mitmproxy import optmanager\nfrom mitmproxy.addons import errorcheck\nfrom mitmproxy.addons import eventstore\nfrom mitmproxy.addons import intercept\nfrom mitmproxy.addons import readfile\nfrom mitmproxy.addons import termlog\nfrom mitmproxy.addons import view\nfrom mitmproxy.addons.proxyserver import Proxyserver\nfrom mitmproxy.tools.web import app\nfrom mitmproxy.tools.web import static_viewer\nfrom mitmproxy.tools.web import webaddons\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebMaster(master.Master):\n def __init__(self, opts: options.Options, with_termlog: bool = True):\n super().__init__(opts)\n self.view = view.View()\n self.view.sig_view_add.connect(self._sig_view_add)\n self.view.sig_view_remove.connect(self._sig_view_remove)\n self.view.sig_view_update.connect(self._sig_view_update)\n self.view.sig_view_refresh.connect(self._sig_view_refresh)\n\n self.events = eventstore.EventStore()\n self.events.sig_add.connect(self._sig_events_add)\n self.events.sig_refresh.connect(self._sig_events_refresh)\n\n self.options.changed.connect(self._sig_options_update)\n\n if with_termlog:\n self.addons.add(termlog.TermLog())\n self.addons.add(*addons.default_addons())\n self.addons.add(\n webaddons.WebAddon(),\n intercept.Intercept(),\n readfile.ReadFile(),\n static_viewer.StaticViewer(),\n self.view,\n self.events,\n errorcheck.ErrorCheck(),\n )\n self.app = app.Application(self, self.options.web_debug)\n self.proxyserver: Proxyserver = self.addons.get(\"proxyserver\")\n self.proxyserver.servers.changed.connect(self._sig_servers_changed)\n\n def _sig_view_add(self, flow: flow.Flow) -> None:\n app.ClientConnection.broadcast(\n resource=\"flows\", cmd=\"add\", data=app.flow_to_json(flow)\n )\n\n def _sig_view_update(self, flow: flow.Flow) -> None:\n app.ClientConnection.broadcast(\n resource=\"flows\", cmd=\"update\", data=app.flow_to_json(flow)\n )\n\n def _sig_view_remove(self, flow: flow.Flow, index: int) -> None:\n app.ClientConnection.broadcast(resource=\"flows\", cmd=\"remove\", data=flow.id)\n\n def _sig_view_refresh(self) -> None:\n app.ClientConnection.broadcast(resource=\"flows\", cmd=\"reset\")\n\n def _sig_events_add(self, entry: log.LogEntry) -> None:\n app.ClientConnection.broadcast(\n resource=\"events\", cmd=\"add\", data=app.logentry_to_json(entry)\n )\n\n def _sig_events_refresh(self) -> None:\n app.ClientConnection.broadcast(resource=\"events\", cmd=\"reset\")\n\n def _sig_options_update(self, updated: set[str]) -> None:\n options_dict = optmanager.dump_dicts(self.options, updated)\n app.ClientConnection.broadcast(\n resource=\"options\", cmd=\"update\", data=options_dict\n )\n\n def _sig_servers_changed(self) -> None:\n app.ClientConnection.broadcast(\n resource=\"state\",\n cmd=\"update\",\n data={\"servers\": [s.to_json() for s in self.proxyserver.servers]},\n )\n\n async def running(self):\n # Register tornado with the current event loop\n tornado.ioloop.IOLoop.current()\n\n # Add our web app.\n http_server = tornado.httpserver.HTTPServer(self.app)\n try:\n http_server.listen(self.options.web_port, self.options.web_host)\n except OSError as e:\n message = f\"Web server failed to listen on {self.options.web_host or '*'}:{self.options.web_port} with {e}\"\n if e.errno == errno.EADDRINUSE:\n message += f\"\\nTry specifying a different port by using `--set web_port={self.options.web_port + 2}`.\"\n raise OSError(e.errno, message, e.filename) from e\n\n logger.info(\n f\"Web server listening at http://{self.options.web_host}:{self.options.web_port}/\",\n )\n\n return await super().running()\n", "path": "mitmproxy/tools/web/master.py"}]}
| 1,847 | 140 |
gh_patches_debug_28108
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-9465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bandcamp] crashes on paid tracks
Latest git-version '22d7368dfb384e7698faad6d2891b4aaceab3d7c' crashes on attempt to download
http://music.bucketheadpikes.com/track/crumple-part-two
This track is not playable in browser. Thus it could be better to display any error text.
> bash-3.2$ ./youtube-dl -v http://music.bucketheadpikes.com/track/crumple-part-two
> [debug] System config: []
> [debug] User config: []
> [debug] Command-line args: [u'-v', u'http://music.bucketheadpikes.com/track/crumple-part-two']
> [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
> [debug] youtube-dl version 2015.11.27.1
> [debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit
> [debug] exe versions: avconv v10_beta1-440-gb33c64e, avprobe v10_beta1-440-gb33c64e, ffmpeg 2.8.2, ffprobe 2.8.2
> [debug] Proxy map: {}
> [generic] crumple-part-two: Requesting header
> WARNING: Falling back on generic information extractor.
> [generic] crumple-part-two: Downloading webpage
> [generic] crumple-part-two: Extracting information
> [Bandcamp] crumple-part-two: Downloading webpage
> Traceback (most recent call last):
> File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
> "__main__", fname, loader, pkg_name)
> File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
> exec code in run_globals
> File "./youtube-dl/__main__.py", line 19, in <module>
> File "./youtube-dl/youtube_dl/**init**.py", line 410, in main
> File "./youtube-dl/youtube_dl/**init**.py", line 400, in _real_main
> File "./youtube-dl/youtube_dl/YoutubeDL.py", line 1669, in download
> File "./youtube-dl/youtube_dl/YoutubeDL.py", line 674, in extract_info
> File "./youtube-dl/youtube_dl/YoutubeDL.py", line 727, in process_ie_result
> File "./youtube-dl/youtube_dl/YoutubeDL.py", line 663, in extract_info
> File "./youtube-dl/youtube_dl/extractor/common.py", line 290, in extract
> File "./youtube-dl/youtube_dl/extractor/bandcamp.py", line 53, in _real_extract
> AttributeError: 'NoneType' object has no attribute 'items'
</issue>
<code>
[start of youtube_dl/extractor/bandcamp.py]
1 from __future__ import unicode_literals
2
3 import json
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import (
8 compat_str,
9 compat_urlparse,
10 )
11 from ..utils import (
12 ExtractorError,
13 float_or_none,
14 int_or_none,
15 )
16
17
18 class BandcampIE(InfoExtractor):
19 _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
20 _TESTS = [{
21 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
22 'md5': 'c557841d5e50261777a6585648adf439',
23 'info_dict': {
24 'id': '1812978515',
25 'ext': 'mp3',
26 'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
27 'duration': 9.8485,
28 },
29 '_skip': 'There is a limit of 200 free downloads / month for the test song'
30 }, {
31 'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
32 'md5': '2b68e5851514c20efdff2afc5603b8b4',
33 'info_dict': {
34 'id': '2650410135',
35 'ext': 'mp3',
36 'title': 'Lanius (Battle)',
37 'uploader': 'Ben Prunty Music',
38 },
39 }]
40
41 def _real_extract(self, url):
42 mobj = re.match(self._VALID_URL, url)
43 title = mobj.group('title')
44 webpage = self._download_webpage(url, title)
45 m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
46 if not m_download:
47 m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
48 if m_trackinfo:
49 json_code = m_trackinfo.group(1)
50 data = json.loads(json_code)[0]
51
52 formats = []
53 for format_id, format_url in data['file'].items():
54 ext, abr_str = format_id.split('-', 1)
55 formats.append({
56 'format_id': format_id,
57 'url': self._proto_relative_url(format_url, 'http:'),
58 'ext': ext,
59 'vcodec': 'none',
60 'acodec': ext,
61 'abr': int_or_none(abr_str),
62 })
63
64 self._sort_formats(formats)
65
66 return {
67 'id': compat_str(data['id']),
68 'title': data['title'],
69 'formats': formats,
70 'duration': float_or_none(data.get('duration')),
71 }
72 else:
73 raise ExtractorError('No free songs found')
74
75 download_link = m_download.group(1)
76 video_id = self._search_regex(
77 r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
78 webpage, 'video id')
79
80 download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
81 # We get the dictionary of the track from some javascript code
82 all_info = self._parse_json(self._search_regex(
83 r'(?sm)items: (.*?),$', download_webpage, 'items'), video_id)
84 info = all_info[0]
85 # We pick mp3-320 for now, until format selection can be easily implemented.
86 mp3_info = info['downloads']['mp3-320']
87 # If we try to use this url it says the link has expired
88 initial_url = mp3_info['url']
89 m_url = re.match(
90 r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$',
91 initial_url)
92 # We build the url we will use to get the final track url
93 # This url is build in Bandcamp in the script download_bunde_*.js
94 request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
95 final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
96 # If we could correctly generate the .rand field the url would be
97 # in the "download_url" key
98 final_url = self._proto_relative_url(self._search_regex(
99 r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:')
100
101 return {
102 'id': video_id,
103 'title': info['title'],
104 'ext': 'mp3',
105 'vcodec': 'none',
106 'url': final_url,
107 'thumbnail': info.get('thumb_url'),
108 'uploader': info.get('artist'),
109 }
110
111
112 class BandcampAlbumIE(InfoExtractor):
113 IE_NAME = 'Bandcamp:album'
114 _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^?#]+)|/?(?:$|[?#]))'
115
116 _TESTS = [{
117 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
118 'playlist': [
119 {
120 'md5': '39bc1eded3476e927c724321ddf116cf',
121 'info_dict': {
122 'id': '1353101989',
123 'ext': 'mp3',
124 'title': 'Intro',
125 }
126 },
127 {
128 'md5': '1a2c32e2691474643e912cc6cd4bffaa',
129 'info_dict': {
130 'id': '38097443',
131 'ext': 'mp3',
132 'title': 'Kero One - Keep It Alive (Blazo remix)',
133 }
134 },
135 ],
136 'info_dict': {
137 'title': 'Jazz Format Mixtape vol.1',
138 'id': 'jazz-format-mixtape-vol-1',
139 'uploader_id': 'blazo',
140 },
141 'params': {
142 'playlistend': 2
143 },
144 'skip': 'Bandcamp imposes download limits.'
145 }, {
146 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
147 'info_dict': {
148 'title': 'Hierophany of the Open Grave',
149 'uploader_id': 'nightbringer',
150 'id': 'hierophany-of-the-open-grave',
151 },
152 'playlist_mincount': 9,
153 }, {
154 'url': 'http://dotscale.bandcamp.com',
155 'info_dict': {
156 'title': 'Loom',
157 'id': 'dotscale',
158 'uploader_id': 'dotscale',
159 },
160 'playlist_mincount': 7,
161 }]
162
163 def _real_extract(self, url):
164 mobj = re.match(self._VALID_URL, url)
165 uploader_id = mobj.group('subdomain')
166 album_id = mobj.group('album_id')
167 playlist_id = album_id or uploader_id
168 webpage = self._download_webpage(url, playlist_id)
169 tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
170 if not tracks_paths:
171 raise ExtractorError('The page doesn\'t contain any tracks')
172 entries = [
173 self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
174 for t_path in tracks_paths]
175 title = self._search_regex(
176 r'album_title\s*:\s*"(.*?)"', webpage, 'title', fatal=False)
177 return {
178 '_type': 'playlist',
179 'uploader_id': uploader_id,
180 'id': playlist_id,
181 'title': title,
182 'entries': entries,
183 }
184
[end of youtube_dl/extractor/bandcamp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -29,7 +29,7 @@
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
- 'md5': '2b68e5851514c20efdff2afc5603b8b4',
+ 'md5': '73d0b3171568232574e45652f8720b5c',
'info_dict': {
'id': '2650410135',
'ext': 'mp3',
@@ -48,6 +48,10 @@
if m_trackinfo:
json_code = m_trackinfo.group(1)
data = json.loads(json_code)[0]
+ track_id = compat_str(data['id'])
+
+ if not data.get('file'):
+ raise ExtractorError('Not streamable', video_id=track_id, expected=True)
formats = []
for format_id, format_url in data['file'].items():
@@ -64,7 +68,7 @@
self._sort_formats(formats)
return {
- 'id': compat_str(data['id']),
+ 'id': track_id,
'title': data['title'],
'formats': formats,
'duration': float_or_none(data.get('duration')),
|
{"golden_diff": "diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py\n--- a/youtube_dl/extractor/bandcamp.py\n+++ b/youtube_dl/extractor/bandcamp.py\n@@ -29,7 +29,7 @@\n '_skip': 'There is a limit of 200 free downloads / month for the test song'\n }, {\n 'url': 'http://benprunty.bandcamp.com/track/lanius-battle',\n- 'md5': '2b68e5851514c20efdff2afc5603b8b4',\n+ 'md5': '73d0b3171568232574e45652f8720b5c',\n 'info_dict': {\n 'id': '2650410135',\n 'ext': 'mp3',\n@@ -48,6 +48,10 @@\n if m_trackinfo:\n json_code = m_trackinfo.group(1)\n data = json.loads(json_code)[0]\n+ track_id = compat_str(data['id'])\n+\n+ if not data.get('file'):\n+ raise ExtractorError('Not streamable', video_id=track_id, expected=True)\n \n formats = []\n for format_id, format_url in data['file'].items():\n@@ -64,7 +68,7 @@\n self._sort_formats(formats)\n \n return {\n- 'id': compat_str(data['id']),\n+ 'id': track_id,\n 'title': data['title'],\n 'formats': formats,\n 'duration': float_or_none(data.get('duration')),\n", "issue": "[bandcamp] crashes on paid tracks\nLatest git-version '22d7368dfb384e7698faad6d2891b4aaceab3d7c' crashes on attempt to download\nhttp://music.bucketheadpikes.com/track/crumple-part-two\nThis track is not playable in browser. Thus it could be better to display any error text.\n\n> bash-3.2$ ./youtube-dl -v http://music.bucketheadpikes.com/track/crumple-part-two\n> [debug] System config: []\n> [debug] User config: []\n> [debug] Command-line args: [u'-v', u'http://music.bucketheadpikes.com/track/crumple-part-two']\n> [debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8\n> [debug] youtube-dl version 2015.11.27.1\n> [debug] Python version 2.7.10 - Darwin-14.5.0-x86_64-i386-64bit\n> [debug] exe versions: avconv v10_beta1-440-gb33c64e, avprobe v10_beta1-440-gb33c64e, ffmpeg 2.8.2, ffprobe 2.8.2\n> [debug] Proxy map: {}\n> [generic] crumple-part-two: Requesting header\n> WARNING: Falling back on generic information extractor.\n> [generic] crumple-part-two: Downloading webpage\n> [generic] crumple-part-two: Extracting information\n> [Bandcamp] crumple-part-two: Downloading webpage\n> Traceback (most recent call last):\n> File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 162, in _run_module_as_main\n> \"__main__\", fname, loader, pkg_name)\n> File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 72, in _run_code\n> exec code in run_globals\n> File \"./youtube-dl/__main__.py\", line 19, in <module>\n> File \"./youtube-dl/youtube_dl/**init**.py\", line 410, in main\n> File \"./youtube-dl/youtube_dl/**init**.py\", line 400, in _real_main\n> File \"./youtube-dl/youtube_dl/YoutubeDL.py\", line 1669, in download\n> File \"./youtube-dl/youtube_dl/YoutubeDL.py\", line 674, in extract_info\n> File \"./youtube-dl/youtube_dl/YoutubeDL.py\", line 727, in process_ie_result\n> File \"./youtube-dl/youtube_dl/YoutubeDL.py\", line 663, in extract_info\n> File \"./youtube-dl/youtube_dl/extractor/common.py\", line 290, in extract\n> File \"./youtube-dl/youtube_dl/extractor/bandcamp.py\", line 53, in _real_extract\n> AttributeError: 'NoneType' object has no attribute 'items'\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_str,\n compat_urlparse,\n)\nfrom ..utils import (\n ExtractorError,\n float_or_none,\n int_or_none,\n)\n\n\nclass BandcampIE(InfoExtractor):\n _VALID_URL = r'https?://.*?\\.bandcamp\\.com/track/(?P<title>.*)'\n _TESTS = [{\n 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',\n 'md5': 'c557841d5e50261777a6585648adf439',\n 'info_dict': {\n 'id': '1812978515',\n 'ext': 'mp3',\n 'title': \"youtube-dl \\\"'/\\\\\\u00e4\\u21ad - youtube-dl test song \\\"'/\\\\\\u00e4\\u21ad\",\n 'duration': 9.8485,\n },\n '_skip': 'There is a limit of 200 free downloads / month for the test song'\n }, {\n 'url': 'http://benprunty.bandcamp.com/track/lanius-battle',\n 'md5': '2b68e5851514c20efdff2afc5603b8b4',\n 'info_dict': {\n 'id': '2650410135',\n 'ext': 'mp3',\n 'title': 'Lanius (Battle)',\n 'uploader': 'Ben Prunty Music',\n },\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n title = mobj.group('title')\n webpage = self._download_webpage(url, title)\n m_download = re.search(r'freeDownloadPage: \"(.*?)\"', webpage)\n if not m_download:\n m_trackinfo = re.search(r'trackinfo: (.+),\\s*?\\n', webpage)\n if m_trackinfo:\n json_code = m_trackinfo.group(1)\n data = json.loads(json_code)[0]\n\n formats = []\n for format_id, format_url in data['file'].items():\n ext, abr_str = format_id.split('-', 1)\n formats.append({\n 'format_id': format_id,\n 'url': self._proto_relative_url(format_url, 'http:'),\n 'ext': ext,\n 'vcodec': 'none',\n 'acodec': ext,\n 'abr': int_or_none(abr_str),\n })\n\n self._sort_formats(formats)\n\n return {\n 'id': compat_str(data['id']),\n 'title': data['title'],\n 'formats': formats,\n 'duration': float_or_none(data.get('duration')),\n }\n else:\n raise ExtractorError('No free songs found')\n\n download_link = m_download.group(1)\n video_id = self._search_regex(\n r'(?ms)var TralbumData = .*?[{,]\\s*id: (?P<id>\\d+),?$',\n webpage, 'video id')\n\n download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')\n # We get the dictionary of the track from some javascript code\n all_info = self._parse_json(self._search_regex(\n r'(?sm)items: (.*?),$', download_webpage, 'items'), video_id)\n info = all_info[0]\n # We pick mp3-320 for now, until format selection can be easily implemented.\n mp3_info = info['downloads']['mp3-320']\n # If we try to use this url it says the link has expired\n initial_url = mp3_info['url']\n m_url = re.match(\n r'(?P<server>http://(.*?)\\.bandcamp\\.com)/download/track\\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$',\n initial_url)\n # We build the url we will use to get the final track url\n # This url is build in Bandcamp in the script download_bunde_*.js\n request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))\n final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')\n # If we could correctly generate the .rand field the url would be\n # in the \"download_url\" key\n final_url = self._proto_relative_url(self._search_regex(\n r'\"retry_url\":\"(.+?)\"', final_url_webpage, 'final video URL'), 'http:')\n\n return {\n 'id': video_id,\n 'title': info['title'],\n 'ext': 'mp3',\n 'vcodec': 'none',\n 'url': final_url,\n 'thumbnail': info.get('thumb_url'),\n 'uploader': info.get('artist'),\n }\n\n\nclass BandcampAlbumIE(InfoExtractor):\n IE_NAME = 'Bandcamp:album'\n _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\\.)?bandcamp\\.com(?:/album/(?P<album_id>[^?#]+)|/?(?:$|[?#]))'\n\n _TESTS = [{\n 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',\n 'playlist': [\n {\n 'md5': '39bc1eded3476e927c724321ddf116cf',\n 'info_dict': {\n 'id': '1353101989',\n 'ext': 'mp3',\n 'title': 'Intro',\n }\n },\n {\n 'md5': '1a2c32e2691474643e912cc6cd4bffaa',\n 'info_dict': {\n 'id': '38097443',\n 'ext': 'mp3',\n 'title': 'Kero One - Keep It Alive (Blazo remix)',\n }\n },\n ],\n 'info_dict': {\n 'title': 'Jazz Format Mixtape vol.1',\n 'id': 'jazz-format-mixtape-vol-1',\n 'uploader_id': 'blazo',\n },\n 'params': {\n 'playlistend': 2\n },\n 'skip': 'Bandcamp imposes download limits.'\n }, {\n 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',\n 'info_dict': {\n 'title': 'Hierophany of the Open Grave',\n 'uploader_id': 'nightbringer',\n 'id': 'hierophany-of-the-open-grave',\n },\n 'playlist_mincount': 9,\n }, {\n 'url': 'http://dotscale.bandcamp.com',\n 'info_dict': {\n 'title': 'Loom',\n 'id': 'dotscale',\n 'uploader_id': 'dotscale',\n },\n 'playlist_mincount': 7,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n uploader_id = mobj.group('subdomain')\n album_id = mobj.group('album_id')\n playlist_id = album_id or uploader_id\n webpage = self._download_webpage(url, playlist_id)\n tracks_paths = re.findall(r'<a href=\"(.*?)\" itemprop=\"url\">', webpage)\n if not tracks_paths:\n raise ExtractorError('The page doesn\\'t contain any tracks')\n entries = [\n self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())\n for t_path in tracks_paths]\n title = self._search_regex(\n r'album_title\\s*:\\s*\"(.*?)\"', webpage, 'title', fatal=False)\n return {\n '_type': 'playlist',\n 'uploader_id': uploader_id,\n 'id': playlist_id,\n 'title': title,\n 'entries': entries,\n }\n", "path": "youtube_dl/extractor/bandcamp.py"}]}
| 3,617 | 386 |
gh_patches_debug_54195
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-1275
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
State leakage across test runs when using parrellization
### What is wrong.
The tests at `tests/examples/safe_remote_purchase/test_safe_remote_purchase.py` fail when run using `pytest-xdist` to parallelize test runs.
```
def test_abort(w3, assert_tx_failed, check_balance, get_contract, contract_code):
a0, a1, a2 = w3.eth.accounts[:3]
c = get_contract(contract_code, value=2)
# Only sender can trigger refund
assert_tx_failed(lambda: c.abort(transact={'from': a2}))
# Refund works correctly
c.abort(transact={'from': a0, 'gasPrice': 0})
> assert check_balance() == (INIT_BAL_a0 - w3.toWei(2, 'ether'), INIT_BAL_a1)
E assert (100000000000...0000000000000) == (9999980000000...0000000000000)
E At index 0 diff: 1000000000000000000000000 != 999998000000000000000000
E Use -v to get the full diff
tests/examples/safe_remote_purchase/test_safe_remote_purchase.py:62: AssertionError
```
replicate by installing `pytest-xdist` and running with
```
pytest tests/examples/safe_remote_purchase/test_safe_remote_purchase.py -n 2
```
It's likely this isn't deterministic and you may need to run the full suite.
### How can it be fixed.
Figure out where statefulness is leaking across test runs and fix it.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 test_deps = [
7 'pytest',
8 'pytest-cov',
9 'py-evm==0.2.0a34',
10 'eth-tester==0.1.0b33',
11 'web3==4.8.2',
12 ]
13
14
15 extras = {
16 'test': test_deps
17 }
18
19
20 setup(
21 name='vyper',
22 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
23 version='0.1.0-beta.8',
24 description='Vyper Programming Language for Ethereum',
25 long_description_markdown_filename='README.md',
26 author='Vitalik Buterin',
27 author_email='',
28 url='https://github.com/ethereum/vyper',
29 license="MIT",
30 keywords='ethereum',
31 include_package_data=True,
32 packages=find_packages(exclude=('tests', 'docs')),
33 python_requires='>=3.6',
34 py_modules=['vyper'],
35 install_requires=[
36 'pycryptodome>=3.5.1,<4',
37 ],
38 setup_requires=[
39 'pytest-runner',
40 'setuptools-markdown'
41 ],
42 tests_require=test_deps,
43 extras_require=extras,
44 scripts=[
45 'bin/vyper',
46 'bin/vyper-serve',
47 'bin/vyper-lll'
48 ],
49 classifiers=[
50 'Intended Audience :: Developers',
51 'License :: OSI Approved :: MIT License',
52 'Programming Language :: Python :: 3.6',
53 ]
54 )
55
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,11 +4,12 @@
test_deps = [
- 'pytest',
- 'pytest-cov',
- 'py-evm==0.2.0a34',
- 'eth-tester==0.1.0b33',
- 'web3==4.8.2',
+ 'pytest>=3.6',
+ 'pytest-cov==2.4.0',
+ 'pytest-xdist==1.18.1',
+ 'py-evm==0.2.0a39',
+ 'eth-tester==0.1.0b37',
+ 'web3==5.0.0a6'
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,11 +4,12 @@\n \n \n test_deps = [\n- 'pytest',\n- 'pytest-cov',\n- 'py-evm==0.2.0a34',\n- 'eth-tester==0.1.0b33',\n- 'web3==4.8.2',\n+ 'pytest>=3.6',\n+ 'pytest-cov==2.4.0',\n+ 'pytest-xdist==1.18.1',\n+ 'py-evm==0.2.0a39',\n+ 'eth-tester==0.1.0b37',\n+ 'web3==5.0.0a6'\n ]\n", "issue": "State leakage across test runs when using parrellization\n### What is wrong.\r\n\r\nThe tests at `tests/examples/safe_remote_purchase/test_safe_remote_purchase.py` fail when run using `pytest-xdist` to parallelize test runs.\r\n\r\n```\r\n def test_abort(w3, assert_tx_failed, check_balance, get_contract, contract_code):\r\n a0, a1, a2 = w3.eth.accounts[:3]\r\n c = get_contract(contract_code, value=2)\r\n # Only sender can trigger refund\r\n assert_tx_failed(lambda: c.abort(transact={'from': a2}))\r\n # Refund works correctly\r\n c.abort(transact={'from': a0, 'gasPrice': 0})\r\n> assert check_balance() == (INIT_BAL_a0 - w3.toWei(2, 'ether'), INIT_BAL_a1)\r\nE assert (100000000000...0000000000000) == (9999980000000...0000000000000)\r\nE At index 0 diff: 1000000000000000000000000 != 999998000000000000000000\r\nE Use -v to get the full diff\r\n\r\ntests/examples/safe_remote_purchase/test_safe_remote_purchase.py:62: AssertionError\r\n```\r\n\r\nreplicate by installing `pytest-xdist` and running with\r\n\r\n```\r\npytest tests/examples/safe_remote_purchase/test_safe_remote_purchase.py -n 2\r\n```\r\n\r\nIt's likely this isn't deterministic and you may need to run the full suite.\r\n\r\n### How can it be fixed.\r\n\r\nFigure out where statefulness is leaking across test runs and fix it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\ntest_deps = [\n 'pytest',\n 'pytest-cov',\n 'py-evm==0.2.0a34',\n 'eth-tester==0.1.0b33',\n 'web3==4.8.2',\n]\n\n\nextras = {\n 'test': test_deps\n}\n\n\nsetup(\n name='vyper',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='0.1.0-beta.8',\n description='Vyper Programming Language for Ethereum',\n long_description_markdown_filename='README.md',\n author='Vitalik Buterin',\n author_email='',\n url='https://github.com/ethereum/vyper',\n license=\"MIT\",\n keywords='ethereum',\n include_package_data=True,\n packages=find_packages(exclude=('tests', 'docs')),\n python_requires='>=3.6',\n py_modules=['vyper'],\n install_requires=[\n 'pycryptodome>=3.5.1,<4',\n ],\n setup_requires=[\n 'pytest-runner',\n 'setuptools-markdown'\n ],\n tests_require=test_deps,\n extras_require=extras,\n scripts=[\n 'bin/vyper',\n 'bin/vyper-serve',\n 'bin/vyper-lll'\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n ]\n)\n", "path": "setup.py"}]}
| 1,366 | 176 |
gh_patches_debug_4575
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-1127
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixing a pyee DeprecationWarning
pyee.AsyncIOEventEmitter was moved to pyee.asyncio.AsyncIOEventEmitter, so this PR just fixes two imports
</issue>
<code>
[start of setup.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import glob
16 import os
17 import platform
18 import shutil
19 import subprocess
20 import sys
21 import zipfile
22 from pathlib import Path
23 from typing import Dict, List
24
25 from setuptools import find_packages, setup
26
27 try:
28 from auditwheel.wheeltools import InWheel
29 except ImportError:
30 InWheel = None
31 from wheel.bdist_wheel import bdist_wheel as BDistWheelCommand
32
33 driver_version = "1.18.0-beta-1642620709000"
34
35
36 def extractall(zip: zipfile.ZipFile, path: str) -> None:
37 for name in zip.namelist():
38 member = zip.getinfo(name)
39 extracted_path = zip.extract(member, path)
40 attr = member.external_attr >> 16
41 if attr != 0:
42 os.chmod(extracted_path, attr)
43
44
45 def download_driver(zip_name: str) -> None:
46 zip_file = f"playwright-{driver_version}-{zip_name}.zip"
47 if os.path.exists("driver/" + zip_file):
48 return
49 url = "https://playwright.azureedge.net/builds/driver/"
50 if (
51 "-alpha" in driver_version
52 or "-beta" in driver_version
53 or "-next" in driver_version
54 ):
55 url = url + "next/"
56 url = url + zip_file
57 print(f"Fetching {url}")
58 # Don't replace this with urllib - Python won't have certificates to do SSL on all platforms.
59 subprocess.check_call(["curl", url, "-o", "driver/" + zip_file])
60
61
62 class PlaywrightBDistWheelCommand(BDistWheelCommand):
63 user_options = BDistWheelCommand.user_options + [
64 ("all", "a", "create wheels for all platforms")
65 ]
66 boolean_options = BDistWheelCommand.boolean_options + ["all"]
67
68 def initialize_options(self) -> None:
69 super().initialize_options()
70 self.all = False
71
72 def run(self) -> None:
73 shutil.rmtree("build", ignore_errors=True)
74 shutil.rmtree("dist", ignore_errors=True)
75 shutil.rmtree("playwright.egg-info", ignore_errors=True)
76 super().run()
77 os.makedirs("driver", exist_ok=True)
78 os.makedirs("playwright/driver", exist_ok=True)
79 base_wheel_bundles: List[Dict[str, str]] = [
80 {
81 "wheel": "macosx_10_13_x86_64.whl",
82 "machine": "x86_64",
83 "platform": "darwin",
84 "zip_name": "mac",
85 },
86 {
87 "wheel": "macosx_11_0_universal2.whl",
88 "machine": "x86_64",
89 "platform": "darwin",
90 "zip_name": "mac",
91 },
92 {
93 "wheel": "macosx_11_0_arm64.whl",
94 "machine": "arm64",
95 "platform": "darwin",
96 "zip_name": "mac-arm64",
97 },
98 {
99 "wheel": "manylinux1_x86_64.whl",
100 "machine": "x86_64",
101 "platform": "linux",
102 "zip_name": "linux",
103 },
104 {
105 "wheel": "manylinux_2_17_aarch64.manylinux2014_aarch64.whl",
106 "machine": "aarch64",
107 "platform": "linux",
108 "zip_name": "linux-arm64",
109 },
110 {
111 "wheel": "win32.whl",
112 "machine": "i386",
113 "platform": "win32",
114 "zip_name": "win32_x64",
115 },
116 {
117 "wheel": "win_amd64.whl",
118 "machine": "amd64",
119 "platform": "win32",
120 "zip_name": "win32_x64",
121 },
122 ]
123 self._download_and_extract_local_driver(base_wheel_bundles)
124
125 wheels = base_wheel_bundles
126 if not self.all:
127 # Limit to 1, since for MacOS e.g. we have multiple wheels for the same platform and architecture and Conda expects 1.
128 wheels = list(
129 filter(
130 lambda wheel: wheel["platform"] == sys.platform
131 and wheel["machine"] == platform.machine().lower(),
132 base_wheel_bundles,
133 )
134 )[:1]
135 self._build_wheels(wheels)
136
137 def _build_wheels(
138 self,
139 wheels: List[Dict[str, str]],
140 ) -> None:
141 base_wheel_location: str = glob.glob(os.path.join(self.dist_dir, "*.whl"))[0]
142 without_platform = base_wheel_location[:-7]
143 for wheel_bundle in wheels:
144 download_driver(wheel_bundle["zip_name"])
145 zip_file = (
146 f"driver/playwright-{driver_version}-{wheel_bundle['zip_name']}.zip"
147 )
148 with zipfile.ZipFile(zip_file, "r") as zip:
149 extractall(zip, f"driver/{wheel_bundle['zip_name']}")
150 wheel_location = without_platform + wheel_bundle["wheel"]
151 shutil.copy(base_wheel_location, wheel_location)
152 with zipfile.ZipFile(wheel_location, "a") as zip:
153 driver_root = os.path.abspath(f"driver/{wheel_bundle['zip_name']}")
154 for dir_path, _, files in os.walk(driver_root):
155 for file in files:
156 from_path = os.path.join(dir_path, file)
157 to_path = os.path.relpath(from_path, driver_root)
158 zip.write(from_path, f"playwright/driver/{to_path}")
159 zip.writestr(
160 "playwright/driver/README.md",
161 f"{wheel_bundle['wheel']} driver package",
162 )
163 os.remove(base_wheel_location)
164 if InWheel:
165 for whlfile in glob.glob(os.path.join(self.dist_dir, "*.whl")):
166 os.makedirs("wheelhouse", exist_ok=True)
167 with InWheel(
168 in_wheel=whlfile,
169 out_wheel=os.path.join("wheelhouse", os.path.basename(whlfile)),
170 ):
171 print(f"Updating RECORD file of {whlfile}")
172 shutil.rmtree(self.dist_dir)
173 print("Copying new wheels")
174 shutil.move("wheelhouse", self.dist_dir)
175 else:
176 print("auditwheel not installed, not updating RECORD file")
177
178 def _download_and_extract_local_driver(
179 self,
180 wheels: List[Dict[str, str]],
181 ) -> None:
182 zip_names_for_current_system = set(
183 map(
184 lambda wheel: wheel["zip_name"],
185 filter(
186 lambda wheel: wheel["machine"] == platform.machine().lower()
187 and wheel["platform"] == sys.platform,
188 wheels,
189 ),
190 )
191 )
192 assert len(zip_names_for_current_system) == 1
193 zip_name = zip_names_for_current_system.pop()
194 download_driver(zip_name)
195 zip_file = f"driver/playwright-{driver_version}-{zip_name}.zip"
196 with zipfile.ZipFile(zip_file, "r") as zip:
197 extractall(zip, "playwright/driver")
198
199
200 setup(
201 name="playwright",
202 author="Microsoft Corporation",
203 author_email="",
204 description="A high-level API to automate web browsers",
205 long_description=Path("README.md").read_text(encoding="utf-8"),
206 long_description_content_type="text/markdown",
207 url="https://github.com/Microsoft/playwright-python",
208 packages=find_packages(exclude=["tests*"]),
209 include_package_data=True,
210 install_requires=[
211 "websockets>=8.1",
212 "greenlet>=1.0.0",
213 "pyee>=8.0.1",
214 "typing-extensions;python_version<='3.8'",
215 ],
216 classifiers=[
217 "Topic :: Software Development :: Testing",
218 "Topic :: Internet :: WWW/HTTP :: Browsers",
219 "Intended Audience :: Developers",
220 "Programming Language :: Python :: 3",
221 "Programming Language :: Python :: 3.7",
222 "Programming Language :: Python :: 3.8",
223 "Programming Language :: Python :: 3.9",
224 "Programming Language :: Python :: 3.10",
225 "License :: OSI Approved :: Apache Software License",
226 "Operating System :: OS Independent",
227 ],
228 python_requires=">=3.7",
229 cmdclass={"bdist_wheel": PlaywrightBDistWheelCommand},
230 use_scm_version={
231 "version_scheme": "post-release",
232 "write_to": "playwright/_repo_version.py",
233 "write_to_template": 'version = "{version}"\n',
234 },
235 setup_requires=["setuptools-scm==6.3.2", "wheel==0.37.0"],
236 entry_points={
237 "console_scripts": [
238 "playwright=playwright.__main__:main",
239 ],
240 "pyinstaller40": ["hook-dirs=playwright._impl.__pyinstaller:get_hook_dirs"],
241 },
242 )
243
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -208,9 +208,9 @@
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
- "websockets>=8.1",
- "greenlet>=1.0.0",
- "pyee>=8.0.1",
+ "websockets==10.1",
+ "greenlet==1.1.2",
+ "pyee==8.1.0",
"typing-extensions;python_version<='3.8'",
],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -208,9 +208,9 @@\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n install_requires=[\n- \"websockets>=8.1\",\n- \"greenlet>=1.0.0\",\n- \"pyee>=8.0.1\",\n+ \"websockets==10.1\",\n+ \"greenlet==1.1.2\",\n+ \"pyee==8.1.0\",\n \"typing-extensions;python_version<='3.8'\",\n ],\n classifiers=[\n", "issue": "Fixing a pyee DeprecationWarning\npyee.AsyncIOEventEmitter was moved to pyee.asyncio.AsyncIOEventEmitter, so this PR just fixes two imports\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nimport zipfile\nfrom pathlib import Path\nfrom typing import Dict, List\n\nfrom setuptools import find_packages, setup\n\ntry:\n from auditwheel.wheeltools import InWheel\nexcept ImportError:\n InWheel = None\nfrom wheel.bdist_wheel import bdist_wheel as BDistWheelCommand\n\ndriver_version = \"1.18.0-beta-1642620709000\"\n\n\ndef extractall(zip: zipfile.ZipFile, path: str) -> None:\n for name in zip.namelist():\n member = zip.getinfo(name)\n extracted_path = zip.extract(member, path)\n attr = member.external_attr >> 16\n if attr != 0:\n os.chmod(extracted_path, attr)\n\n\ndef download_driver(zip_name: str) -> None:\n zip_file = f\"playwright-{driver_version}-{zip_name}.zip\"\n if os.path.exists(\"driver/\" + zip_file):\n return\n url = \"https://playwright.azureedge.net/builds/driver/\"\n if (\n \"-alpha\" in driver_version\n or \"-beta\" in driver_version\n or \"-next\" in driver_version\n ):\n url = url + \"next/\"\n url = url + zip_file\n print(f\"Fetching {url}\")\n # Don't replace this with urllib - Python won't have certificates to do SSL on all platforms.\n subprocess.check_call([\"curl\", url, \"-o\", \"driver/\" + zip_file])\n\n\nclass PlaywrightBDistWheelCommand(BDistWheelCommand):\n user_options = BDistWheelCommand.user_options + [\n (\"all\", \"a\", \"create wheels for all platforms\")\n ]\n boolean_options = BDistWheelCommand.boolean_options + [\"all\"]\n\n def initialize_options(self) -> None:\n super().initialize_options()\n self.all = False\n\n def run(self) -> None:\n shutil.rmtree(\"build\", ignore_errors=True)\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"playwright.egg-info\", ignore_errors=True)\n super().run()\n os.makedirs(\"driver\", exist_ok=True)\n os.makedirs(\"playwright/driver\", exist_ok=True)\n base_wheel_bundles: List[Dict[str, str]] = [\n {\n \"wheel\": \"macosx_10_13_x86_64.whl\",\n \"machine\": \"x86_64\",\n \"platform\": \"darwin\",\n \"zip_name\": \"mac\",\n },\n {\n \"wheel\": \"macosx_11_0_universal2.whl\",\n \"machine\": \"x86_64\",\n \"platform\": \"darwin\",\n \"zip_name\": \"mac\",\n },\n {\n \"wheel\": \"macosx_11_0_arm64.whl\",\n \"machine\": \"arm64\",\n \"platform\": \"darwin\",\n \"zip_name\": \"mac-arm64\",\n },\n {\n \"wheel\": \"manylinux1_x86_64.whl\",\n \"machine\": \"x86_64\",\n \"platform\": \"linux\",\n \"zip_name\": \"linux\",\n },\n {\n \"wheel\": \"manylinux_2_17_aarch64.manylinux2014_aarch64.whl\",\n \"machine\": \"aarch64\",\n \"platform\": \"linux\",\n \"zip_name\": \"linux-arm64\",\n },\n {\n \"wheel\": \"win32.whl\",\n \"machine\": \"i386\",\n \"platform\": \"win32\",\n \"zip_name\": \"win32_x64\",\n },\n {\n \"wheel\": \"win_amd64.whl\",\n \"machine\": \"amd64\",\n \"platform\": \"win32\",\n \"zip_name\": \"win32_x64\",\n },\n ]\n self._download_and_extract_local_driver(base_wheel_bundles)\n\n wheels = base_wheel_bundles\n if not self.all:\n # Limit to 1, since for MacOS e.g. we have multiple wheels for the same platform and architecture and Conda expects 1.\n wheels = list(\n filter(\n lambda wheel: wheel[\"platform\"] == sys.platform\n and wheel[\"machine\"] == platform.machine().lower(),\n base_wheel_bundles,\n )\n )[:1]\n self._build_wheels(wheels)\n\n def _build_wheels(\n self,\n wheels: List[Dict[str, str]],\n ) -> None:\n base_wheel_location: str = glob.glob(os.path.join(self.dist_dir, \"*.whl\"))[0]\n without_platform = base_wheel_location[:-7]\n for wheel_bundle in wheels:\n download_driver(wheel_bundle[\"zip_name\"])\n zip_file = (\n f\"driver/playwright-{driver_version}-{wheel_bundle['zip_name']}.zip\"\n )\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, f\"driver/{wheel_bundle['zip_name']}\")\n wheel_location = without_platform + wheel_bundle[\"wheel\"]\n shutil.copy(base_wheel_location, wheel_location)\n with zipfile.ZipFile(wheel_location, \"a\") as zip:\n driver_root = os.path.abspath(f\"driver/{wheel_bundle['zip_name']}\")\n for dir_path, _, files in os.walk(driver_root):\n for file in files:\n from_path = os.path.join(dir_path, file)\n to_path = os.path.relpath(from_path, driver_root)\n zip.write(from_path, f\"playwright/driver/{to_path}\")\n zip.writestr(\n \"playwright/driver/README.md\",\n f\"{wheel_bundle['wheel']} driver package\",\n )\n os.remove(base_wheel_location)\n if InWheel:\n for whlfile in glob.glob(os.path.join(self.dist_dir, \"*.whl\")):\n os.makedirs(\"wheelhouse\", exist_ok=True)\n with InWheel(\n in_wheel=whlfile,\n out_wheel=os.path.join(\"wheelhouse\", os.path.basename(whlfile)),\n ):\n print(f\"Updating RECORD file of {whlfile}\")\n shutil.rmtree(self.dist_dir)\n print(\"Copying new wheels\")\n shutil.move(\"wheelhouse\", self.dist_dir)\n else:\n print(\"auditwheel not installed, not updating RECORD file\")\n\n def _download_and_extract_local_driver(\n self,\n wheels: List[Dict[str, str]],\n ) -> None:\n zip_names_for_current_system = set(\n map(\n lambda wheel: wheel[\"zip_name\"],\n filter(\n lambda wheel: wheel[\"machine\"] == platform.machine().lower()\n and wheel[\"platform\"] == sys.platform,\n wheels,\n ),\n )\n )\n assert len(zip_names_for_current_system) == 1\n zip_name = zip_names_for_current_system.pop()\n download_driver(zip_name)\n zip_file = f\"driver/playwright-{driver_version}-{zip_name}.zip\"\n with zipfile.ZipFile(zip_file, \"r\") as zip:\n extractall(zip, \"playwright/driver\")\n\n\nsetup(\n name=\"playwright\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=Path(\"README.md\").read_text(encoding=\"utf-8\"),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n install_requires=[\n \"websockets>=8.1\",\n \"greenlet>=1.0.0\",\n \"pyee>=8.0.1\",\n \"typing-extensions;python_version<='3.8'\",\n ],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n cmdclass={\"bdist_wheel\": PlaywrightBDistWheelCommand},\n use_scm_version={\n \"version_scheme\": \"post-release\",\n \"write_to\": \"playwright/_repo_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n setup_requires=[\"setuptools-scm==6.3.2\", \"wheel==0.37.0\"],\n entry_points={\n \"console_scripts\": [\n \"playwright=playwright.__main__:main\",\n ],\n \"pyinstaller40\": [\"hook-dirs=playwright._impl.__pyinstaller:get_hook_dirs\"],\n },\n)\n", "path": "setup.py"}]}
| 3,248 | 142 |
gh_patches_debug_17164
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-21658
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Mixer] VOD parsing fails
Vods now also include "-" i.e.: https://mixer.com/TheViper?vod=Rh3LY0VAqkGpEQUe2pN-ig
This is not correctly parsed by the `_VALID_URL` regex.
</issue>
<code>
[start of youtube_dl/extractor/beampro.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..utils import (
6 ExtractorError,
7 clean_html,
8 compat_str,
9 float_or_none,
10 int_or_none,
11 parse_iso8601,
12 try_get,
13 urljoin,
14 )
15
16
17 class BeamProBaseIE(InfoExtractor):
18 _API_BASE = 'https://mixer.com/api/v1'
19 _RATINGS = {'family': 0, 'teen': 13, '18+': 18}
20
21 def _extract_channel_info(self, chan):
22 user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id'])
23 return {
24 'uploader': chan.get('token') or try_get(
25 chan, lambda x: x['user']['username'], compat_str),
26 'uploader_id': compat_str(user_id) if user_id else None,
27 'age_limit': self._RATINGS.get(chan.get('audience')),
28 }
29
30
31 class BeamProLiveIE(BeamProBaseIE):
32 IE_NAME = 'Mixer:live'
33 _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/(?P<id>[^/?#&]+)'
34 _TEST = {
35 'url': 'http://mixer.com/niterhayven',
36 'info_dict': {
37 'id': '261562',
38 'ext': 'mp4',
39 'title': 'Introducing The Witcher 3 // The Grind Starts Now!',
40 'description': 'md5:0b161ac080f15fe05d18a07adb44a74d',
41 'thumbnail': r're:https://.*\.jpg$',
42 'timestamp': 1483477281,
43 'upload_date': '20170103',
44 'uploader': 'niterhayven',
45 'uploader_id': '373396',
46 'age_limit': 18,
47 'is_live': True,
48 'view_count': int,
49 },
50 'skip': 'niterhayven is offline',
51 'params': {
52 'skip_download': True,
53 },
54 }
55
56 _MANIFEST_URL_TEMPLATE = '%s/channels/%%s/manifest.%%s' % BeamProBaseIE._API_BASE
57
58 @classmethod
59 def suitable(cls, url):
60 return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url)
61
62 def _real_extract(self, url):
63 channel_name = self._match_id(url)
64
65 chan = self._download_json(
66 '%s/channels/%s' % (self._API_BASE, channel_name), channel_name)
67
68 if chan.get('online') is False:
69 raise ExtractorError(
70 '{0} is offline'.format(channel_name), expected=True)
71
72 channel_id = chan['id']
73
74 def manifest_url(kind):
75 return self._MANIFEST_URL_TEMPLATE % (channel_id, kind)
76
77 formats = self._extract_m3u8_formats(
78 manifest_url('m3u8'), channel_name, ext='mp4', m3u8_id='hls',
79 fatal=False)
80 formats.extend(self._extract_smil_formats(
81 manifest_url('smil'), channel_name, fatal=False))
82 self._sort_formats(formats)
83
84 info = {
85 'id': compat_str(chan.get('id') or channel_name),
86 'title': self._live_title(chan.get('name') or channel_name),
87 'description': clean_html(chan.get('description')),
88 'thumbnail': try_get(
89 chan, lambda x: x['thumbnail']['url'], compat_str),
90 'timestamp': parse_iso8601(chan.get('updatedAt')),
91 'is_live': True,
92 'view_count': int_or_none(chan.get('viewersTotal')),
93 'formats': formats,
94 }
95 info.update(self._extract_channel_info(chan))
96
97 return info
98
99
100 class BeamProVodIE(BeamProBaseIE):
101 IE_NAME = 'Mixer:vod'
102 _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>\w+)'
103 _TESTS = [{
104 'url': 'https://mixer.com/willow8714?vod=2259830',
105 'md5': 'b2431e6e8347dc92ebafb565d368b76b',
106 'info_dict': {
107 'id': '2259830',
108 'ext': 'mp4',
109 'title': 'willow8714\'s Channel',
110 'duration': 6828.15,
111 'thumbnail': r're:https://.*source\.png$',
112 'timestamp': 1494046474,
113 'upload_date': '20170506',
114 'uploader': 'willow8714',
115 'uploader_id': '6085379',
116 'age_limit': 13,
117 'view_count': int,
118 },
119 'params': {
120 'skip_download': True,
121 },
122 }, {
123 'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',
124 'only_matching': True,
125 }]
126
127 @staticmethod
128 def _extract_format(vod, vod_type):
129 if not vod.get('baseUrl'):
130 return []
131
132 if vod_type == 'hls':
133 filename, protocol = 'manifest.m3u8', 'm3u8_native'
134 elif vod_type == 'raw':
135 filename, protocol = 'source.mp4', 'https'
136 else:
137 assert False
138
139 data = vod.get('data') if isinstance(vod.get('data'), dict) else {}
140
141 format_id = [vod_type]
142 if isinstance(data.get('Height'), compat_str):
143 format_id.append('%sp' % data['Height'])
144
145 return [{
146 'url': urljoin(vod['baseUrl'], filename),
147 'format_id': '-'.join(format_id),
148 'ext': 'mp4',
149 'protocol': protocol,
150 'width': int_or_none(data.get('Width')),
151 'height': int_or_none(data.get('Height')),
152 'fps': int_or_none(data.get('Fps')),
153 'tbr': int_or_none(data.get('Bitrate'), 1000),
154 }]
155
156 def _real_extract(self, url):
157 vod_id = self._match_id(url)
158
159 vod_info = self._download_json(
160 '%s/recordings/%s' % (self._API_BASE, vod_id), vod_id)
161
162 state = vod_info.get('state')
163 if state != 'AVAILABLE':
164 raise ExtractorError(
165 'VOD %s is not available (state: %s)' % (vod_id, state),
166 expected=True)
167
168 formats = []
169 thumbnail_url = None
170
171 for vod in vod_info['vods']:
172 vod_type = vod.get('format')
173 if vod_type in ('hls', 'raw'):
174 formats.extend(self._extract_format(vod, vod_type))
175 elif vod_type == 'thumbnail':
176 thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png')
177
178 self._sort_formats(formats)
179
180 info = {
181 'id': vod_id,
182 'title': vod_info.get('name') or vod_id,
183 'duration': float_or_none(vod_info.get('duration')),
184 'thumbnail': thumbnail_url,
185 'timestamp': parse_iso8601(vod_info.get('createdAt')),
186 'view_count': int_or_none(vod_info.get('viewsTotal')),
187 'formats': formats,
188 }
189 info.update(self._extract_channel_info(vod_info.get('channel') or {}))
190
191 return info
192
[end of youtube_dl/extractor/beampro.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/youtube_dl/extractor/beampro.py b/youtube_dl/extractor/beampro.py
--- a/youtube_dl/extractor/beampro.py
+++ b/youtube_dl/extractor/beampro.py
@@ -99,7 +99,7 @@
class BeamProVodIE(BeamProBaseIE):
IE_NAME = 'Mixer:vod'
- _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>\w+)'
+ _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'https://mixer.com/willow8714?vod=2259830',
'md5': 'b2431e6e8347dc92ebafb565d368b76b',
@@ -122,6 +122,9 @@
}, {
'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',
'only_matching': True,
+ }, {
+ 'url': 'https://mixer.com/streamer?vod=Rh3LY0VAqkGpEQUe2pN-ig',
+ 'only_matching': True,
}]
@staticmethod
|
{"golden_diff": "diff --git a/youtube_dl/extractor/beampro.py b/youtube_dl/extractor/beampro.py\n--- a/youtube_dl/extractor/beampro.py\n+++ b/youtube_dl/extractor/beampro.py\n@@ -99,7 +99,7 @@\n \n class BeamProVodIE(BeamProBaseIE):\n IE_NAME = 'Mixer:vod'\n- _VALID_URL = r'https?://(?:\\w+\\.)?(?:beam\\.pro|mixer\\.com)/[^/?#&]+\\?.*?\\bvod=(?P<id>\\w+)'\n+ _VALID_URL = r'https?://(?:\\w+\\.)?(?:beam\\.pro|mixer\\.com)/[^/?#&]+\\?.*?\\bvod=(?P<id>[^?#&]+)'\n _TESTS = [{\n 'url': 'https://mixer.com/willow8714?vod=2259830',\n 'md5': 'b2431e6e8347dc92ebafb565d368b76b',\n@@ -122,6 +122,9 @@\n }, {\n 'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://mixer.com/streamer?vod=Rh3LY0VAqkGpEQUe2pN-ig',\n+ 'only_matching': True,\n }]\n \n @staticmethod\n", "issue": "[Mixer] VOD parsing fails\nVods now also include \"-\" i.e.: https://mixer.com/TheViper?vod=Rh3LY0VAqkGpEQUe2pN-ig\r\n\r\nThis is not correctly parsed by the `_VALID_URL` regex.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n clean_html,\n compat_str,\n float_or_none,\n int_or_none,\n parse_iso8601,\n try_get,\n urljoin,\n)\n\n\nclass BeamProBaseIE(InfoExtractor):\n _API_BASE = 'https://mixer.com/api/v1'\n _RATINGS = {'family': 0, 'teen': 13, '18+': 18}\n\n def _extract_channel_info(self, chan):\n user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id'])\n return {\n 'uploader': chan.get('token') or try_get(\n chan, lambda x: x['user']['username'], compat_str),\n 'uploader_id': compat_str(user_id) if user_id else None,\n 'age_limit': self._RATINGS.get(chan.get('audience')),\n }\n\n\nclass BeamProLiveIE(BeamProBaseIE):\n IE_NAME = 'Mixer:live'\n _VALID_URL = r'https?://(?:\\w+\\.)?(?:beam\\.pro|mixer\\.com)/(?P<id>[^/?#&]+)'\n _TEST = {\n 'url': 'http://mixer.com/niterhayven',\n 'info_dict': {\n 'id': '261562',\n 'ext': 'mp4',\n 'title': 'Introducing The Witcher 3 // The Grind Starts Now!',\n 'description': 'md5:0b161ac080f15fe05d18a07adb44a74d',\n 'thumbnail': r're:https://.*\\.jpg$',\n 'timestamp': 1483477281,\n 'upload_date': '20170103',\n 'uploader': 'niterhayven',\n 'uploader_id': '373396',\n 'age_limit': 18,\n 'is_live': True,\n 'view_count': int,\n },\n 'skip': 'niterhayven is offline',\n 'params': {\n 'skip_download': True,\n },\n }\n\n _MANIFEST_URL_TEMPLATE = '%s/channels/%%s/manifest.%%s' % BeamProBaseIE._API_BASE\n\n @classmethod\n def suitable(cls, url):\n return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url)\n\n def _real_extract(self, url):\n channel_name = self._match_id(url)\n\n chan = self._download_json(\n '%s/channels/%s' % (self._API_BASE, channel_name), channel_name)\n\n if chan.get('online') is False:\n raise ExtractorError(\n '{0} is offline'.format(channel_name), expected=True)\n\n channel_id = chan['id']\n\n def manifest_url(kind):\n return self._MANIFEST_URL_TEMPLATE % (channel_id, kind)\n\n formats = self._extract_m3u8_formats(\n manifest_url('m3u8'), channel_name, ext='mp4', m3u8_id='hls',\n fatal=False)\n formats.extend(self._extract_smil_formats(\n manifest_url('smil'), channel_name, fatal=False))\n self._sort_formats(formats)\n\n info = {\n 'id': compat_str(chan.get('id') or channel_name),\n 'title': self._live_title(chan.get('name') or channel_name),\n 'description': clean_html(chan.get('description')),\n 'thumbnail': try_get(\n chan, lambda x: x['thumbnail']['url'], compat_str),\n 'timestamp': parse_iso8601(chan.get('updatedAt')),\n 'is_live': True,\n 'view_count': int_or_none(chan.get('viewersTotal')),\n 'formats': formats,\n }\n info.update(self._extract_channel_info(chan))\n\n return info\n\n\nclass BeamProVodIE(BeamProBaseIE):\n IE_NAME = 'Mixer:vod'\n _VALID_URL = r'https?://(?:\\w+\\.)?(?:beam\\.pro|mixer\\.com)/[^/?#&]+\\?.*?\\bvod=(?P<id>\\w+)'\n _TESTS = [{\n 'url': 'https://mixer.com/willow8714?vod=2259830',\n 'md5': 'b2431e6e8347dc92ebafb565d368b76b',\n 'info_dict': {\n 'id': '2259830',\n 'ext': 'mp4',\n 'title': 'willow8714\\'s Channel',\n 'duration': 6828.15,\n 'thumbnail': r're:https://.*source\\.png$',\n 'timestamp': 1494046474,\n 'upload_date': '20170506',\n 'uploader': 'willow8714',\n 'uploader_id': '6085379',\n 'age_limit': 13,\n 'view_count': int,\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _extract_format(vod, vod_type):\n if not vod.get('baseUrl'):\n return []\n\n if vod_type == 'hls':\n filename, protocol = 'manifest.m3u8', 'm3u8_native'\n elif vod_type == 'raw':\n filename, protocol = 'source.mp4', 'https'\n else:\n assert False\n\n data = vod.get('data') if isinstance(vod.get('data'), dict) else {}\n\n format_id = [vod_type]\n if isinstance(data.get('Height'), compat_str):\n format_id.append('%sp' % data['Height'])\n\n return [{\n 'url': urljoin(vod['baseUrl'], filename),\n 'format_id': '-'.join(format_id),\n 'ext': 'mp4',\n 'protocol': protocol,\n 'width': int_or_none(data.get('Width')),\n 'height': int_or_none(data.get('Height')),\n 'fps': int_or_none(data.get('Fps')),\n 'tbr': int_or_none(data.get('Bitrate'), 1000),\n }]\n\n def _real_extract(self, url):\n vod_id = self._match_id(url)\n\n vod_info = self._download_json(\n '%s/recordings/%s' % (self._API_BASE, vod_id), vod_id)\n\n state = vod_info.get('state')\n if state != 'AVAILABLE':\n raise ExtractorError(\n 'VOD %s is not available (state: %s)' % (vod_id, state),\n expected=True)\n\n formats = []\n thumbnail_url = None\n\n for vod in vod_info['vods']:\n vod_type = vod.get('format')\n if vod_type in ('hls', 'raw'):\n formats.extend(self._extract_format(vod, vod_type))\n elif vod_type == 'thumbnail':\n thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png')\n\n self._sort_formats(formats)\n\n info = {\n 'id': vod_id,\n 'title': vod_info.get('name') or vod_id,\n 'duration': float_or_none(vod_info.get('duration')),\n 'thumbnail': thumbnail_url,\n 'timestamp': parse_iso8601(vod_info.get('createdAt')),\n 'view_count': int_or_none(vod_info.get('viewsTotal')),\n 'formats': formats,\n }\n info.update(self._extract_channel_info(vod_info.get('channel') or {}))\n\n return info\n", "path": "youtube_dl/extractor/beampro.py"}]}
| 2,860 | 359 |
gh_patches_debug_217
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-3701
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 4293: can't edit polls somebody else created even if I have the rights
**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24-4/poll/
**user:** group member
**expected behaviour:** I can edit polls somebody else created if I have the right to do so
**behaviour:** cannot save, getting an red altert
**important screensize:**
**device & browser:**
**Comment/Question:** also true for new polls whose rights have been given to me. for polls I started myself it is fine.

Screenshot?
</issue>
<code>
[start of meinberlin/apps/polls/rules.py]
1 import rules
2
3 from adhocracy4.modules import predicates as module_predicates
4
5 rules.set_perm(
6 'a4polls.change_poll',
7 module_predicates.is_context_initiator |
8 module_predicates.is_context_moderator
9 )
10
[end of meinberlin/apps/polls/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/polls/rules.py b/meinberlin/apps/polls/rules.py
--- a/meinberlin/apps/polls/rules.py
+++ b/meinberlin/apps/polls/rules.py
@@ -4,6 +4,5 @@
rules.set_perm(
'a4polls.change_poll',
- module_predicates.is_context_initiator |
- module_predicates.is_context_moderator
+ module_predicates.is_project_admin
)
|
{"golden_diff": "diff --git a/meinberlin/apps/polls/rules.py b/meinberlin/apps/polls/rules.py\n--- a/meinberlin/apps/polls/rules.py\n+++ b/meinberlin/apps/polls/rules.py\n@@ -4,6 +4,5 @@\n \n rules.set_perm(\n 'a4polls.change_poll',\n- module_predicates.is_context_initiator |\n- module_predicates.is_context_moderator\n+ module_predicates.is_project_admin\n )\n", "issue": "testing 4293: can't edit polls somebody else created even if I have the rights\n**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24-4/poll/\r\n**user:** group member\r\n**expected behaviour:** I can edit polls somebody else created if I have the right to do so\r\n**behaviour:** cannot save, getting an red altert\r\n**important screensize:**\r\n**device & browser:** \r\n **Comment/Question:** also true for new polls whose rights have been given to me. for polls I started myself it is fine.\r\n\r\n\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "import rules\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nrules.set_perm(\n 'a4polls.change_poll',\n module_predicates.is_context_initiator |\n module_predicates.is_context_moderator\n)\n", "path": "meinberlin/apps/polls/rules.py"}]}
| 819 | 103 |
gh_patches_debug_14780
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-30922
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow alerting based on `sdk.name`
### Problem Statement
We're in a React Native project and we'd like the ability to have separate reporting rules based on the `sdk.name`. Right now we're looking very closely at any errors which are reported from `sentry.cocoa` and we would like to send them to a channel where the dedicated iOS engineers can look at them without being bothered by the issues that `sentry.javascript` generates (we have a separate team looking closely at any JS errors).
When I went to go set up a reporting rule, I noticed that there's no way to filter based on `sdk.name`:

### Solution Brainstorm
Hopefully it would be pretty easy to add this new option to the dropdown 🤞
</issue>
<code>
[start of src/sentry/rules/conditions/event_attribute.py]
1 from collections import OrderedDict
2
3 from django import forms
4
5 from sentry.rules.conditions.base import EventCondition
6
7
8 class MatchType:
9 EQUAL = "eq"
10 NOT_EQUAL = "ne"
11 STARTS_WITH = "sw"
12 NOT_STARTS_WITH = "nsw"
13 ENDS_WITH = "ew"
14 NOT_ENDS_WITH = "new"
15 CONTAINS = "co"
16 NOT_CONTAINS = "nc"
17 IS_SET = "is"
18 NOT_SET = "ns"
19
20
21 MATCH_CHOICES = OrderedDict(
22 [
23 (MatchType.EQUAL, "equals"),
24 (MatchType.NOT_EQUAL, "does not equal"),
25 (MatchType.STARTS_WITH, "starts with"),
26 (MatchType.NOT_STARTS_WITH, "does not start with"),
27 (MatchType.ENDS_WITH, "ends with"),
28 (MatchType.NOT_ENDS_WITH, "does not end with"),
29 (MatchType.CONTAINS, "contains"),
30 (MatchType.NOT_CONTAINS, "does not contain"),
31 (MatchType.IS_SET, "is set"),
32 (MatchType.NOT_SET, "is not set"),
33 ]
34 )
35
36 ATTR_CHOICES = [
37 "message",
38 "platform",
39 "environment",
40 "type",
41 "exception.type",
42 "exception.value",
43 "user.id",
44 "user.email",
45 "user.username",
46 "user.ip_address",
47 "http.method",
48 "http.url",
49 "stacktrace.code",
50 "stacktrace.module",
51 "stacktrace.filename",
52 "stacktrace.abs_path",
53 "stacktrace.package",
54 ]
55
56
57 class EventAttributeForm(forms.Form):
58 attribute = forms.ChoiceField(choices=[(a, a) for a in ATTR_CHOICES])
59 match = forms.ChoiceField(choices=list(MATCH_CHOICES.items()))
60 value = forms.CharField(widget=forms.TextInput(), required=False)
61
62
63 class EventAttributeCondition(EventCondition):
64 """
65 Attributes are a mapping of <logical-key>.<property>.
66
67 For example:
68
69 - message
70 - platform
71 - exception.{type,value}
72 - user.{id,ip_address,email,FIELD}
73 - http.{method,url}
74 - stacktrace.{code,module,filename,abs_path,package}
75 - extra.{FIELD}
76 """
77
78 # TODO(dcramer): add support for stacktrace.vars.[name]
79
80 form_cls = EventAttributeForm
81 label = "The event's {attribute} value {match} {value}"
82
83 form_fields = {
84 "attribute": {
85 "type": "choice",
86 "placeholder": "i.e. exception.type",
87 "choices": [[a, a] for a in ATTR_CHOICES],
88 },
89 "match": {"type": "choice", "choices": list(MATCH_CHOICES.items())},
90 "value": {"type": "string", "placeholder": "value"},
91 }
92
93 def _get_attribute_values(self, event, attr):
94 # TODO(dcramer): we should validate attributes (when we can) before
95 path = attr.split(".")
96
97 if path[0] == "platform":
98 if len(path) != 1:
99 return []
100 return [event.platform]
101
102 if path[0] == "message":
103 if len(path) != 1:
104 return []
105 return [event.message, event.search_message]
106 elif path[0] == "environment":
107 return [event.get_tag("environment")]
108
109 elif path[0] == "type":
110 return [event.data["type"]]
111
112 elif len(path) == 1:
113 return []
114
115 elif path[0] == "extra":
116 path.pop(0)
117 value = event.data["extra"]
118 while path:
119 bit = path.pop(0)
120 value = value.get(bit)
121 if not value:
122 return []
123
124 if isinstance(value, (list, tuple)):
125 return value
126 return [value]
127
128 elif len(path) != 2:
129 return []
130
131 elif path[0] == "exception":
132 if path[1] not in ("type", "value"):
133 return []
134
135 return [getattr(e, path[1]) for e in event.interfaces["exception"].values]
136
137 elif path[0] == "user":
138 if path[1] in ("id", "ip_address", "email", "username"):
139 return [getattr(event.interfaces["user"], path[1])]
140 return [getattr(event.interfaces["user"].data, path[1])]
141
142 elif path[0] == "http":
143 if path[1] not in ("url", "method"):
144 return []
145
146 return [getattr(event.interfaces["request"], path[1])]
147
148 elif path[0] == "stacktrace":
149 stacks = event.interfaces.get("stacktrace")
150 if stacks:
151 stacks = [stacks]
152 else:
153 stacks = [
154 e.stacktrace for e in event.interfaces["exception"].values if e.stacktrace
155 ]
156 result = []
157 for st in stacks:
158 for frame in st.frames:
159 if path[1] in ("filename", "module", "abs_path", "package"):
160 result.append(getattr(frame, path[1]))
161 elif path[1] == "code":
162 if frame.pre_context:
163 result.extend(frame.pre_context)
164 if frame.context_line:
165 result.append(frame.context_line)
166 if frame.post_context:
167 result.extend(frame.post_context)
168 return result
169 return []
170
171 def render_label(self):
172 data = {
173 "attribute": self.data["attribute"],
174 "value": self.data["value"],
175 "match": MATCH_CHOICES[self.data["match"]],
176 }
177 return self.label.format(**data)
178
179 def passes(self, event, state, **kwargs):
180 attr = self.get_option("attribute")
181 match = self.get_option("match")
182 value = self.get_option("value")
183
184 if not (attr and match and value):
185 return False
186
187 value = value.lower()
188 attr = attr.lower()
189
190 try:
191 attribute_values = self._get_attribute_values(event, attr)
192 except KeyError:
193 attribute_values = []
194
195 attribute_values = [str(v).lower() for v in attribute_values if v is not None]
196
197 if match == MatchType.EQUAL:
198 for a_value in attribute_values:
199 if a_value == value:
200 return True
201 return False
202
203 elif match == MatchType.NOT_EQUAL:
204 for a_value in attribute_values:
205 if a_value == value:
206 return False
207 return True
208
209 elif match == MatchType.STARTS_WITH:
210 for a_value in attribute_values:
211 if a_value.startswith(value):
212 return True
213 return False
214
215 elif match == MatchType.NOT_STARTS_WITH:
216 for a_value in attribute_values:
217 if a_value.startswith(value):
218 return False
219 return True
220
221 elif match == MatchType.ENDS_WITH:
222 for a_value in attribute_values:
223 if a_value.endswith(value):
224 return True
225 return False
226
227 elif match == MatchType.NOT_ENDS_WITH:
228 for a_value in attribute_values:
229 if a_value.endswith(value):
230 return False
231 return True
232
233 elif match == MatchType.CONTAINS:
234 for a_value in attribute_values:
235 if value in a_value:
236 return True
237 return False
238
239 elif match == MatchType.NOT_CONTAINS:
240 for a_value in attribute_values:
241 if value in a_value:
242 return False
243 return True
244
245 elif match == MatchType.IS_SET:
246 return bool(attribute_values)
247
248 elif match == MatchType.NOT_SET:
249 return not attribute_values
250
[end of src/sentry/rules/conditions/event_attribute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/rules/conditions/event_attribute.py b/src/sentry/rules/conditions/event_attribute.py
--- a/src/sentry/rules/conditions/event_attribute.py
+++ b/src/sentry/rules/conditions/event_attribute.py
@@ -46,6 +46,7 @@
"user.ip_address",
"http.method",
"http.url",
+ "sdk.name",
"stacktrace.code",
"stacktrace.module",
"stacktrace.filename",
@@ -145,6 +146,11 @@
return [getattr(event.interfaces["request"], path[1])]
+ elif path[0] == "sdk":
+ if path[1] != "name":
+ return []
+ return [event.data["sdk"].get(path[1])]
+
elif path[0] == "stacktrace":
stacks = event.interfaces.get("stacktrace")
if stacks:
|
{"golden_diff": "diff --git a/src/sentry/rules/conditions/event_attribute.py b/src/sentry/rules/conditions/event_attribute.py\n--- a/src/sentry/rules/conditions/event_attribute.py\n+++ b/src/sentry/rules/conditions/event_attribute.py\n@@ -46,6 +46,7 @@\n \"user.ip_address\",\n \"http.method\",\n \"http.url\",\n+ \"sdk.name\",\n \"stacktrace.code\",\n \"stacktrace.module\",\n \"stacktrace.filename\",\n@@ -145,6 +146,11 @@\n \n return [getattr(event.interfaces[\"request\"], path[1])]\n \n+ elif path[0] == \"sdk\":\n+ if path[1] != \"name\":\n+ return []\n+ return [event.data[\"sdk\"].get(path[1])]\n+\n elif path[0] == \"stacktrace\":\n stacks = event.interfaces.get(\"stacktrace\")\n if stacks:\n", "issue": "Allow alerting based on `sdk.name`\n### Problem Statement\n\nWe're in a React Native project and we'd like the ability to have separate reporting rules based on the `sdk.name`. Right now we're looking very closely at any errors which are reported from `sentry.cocoa` and we would like to send them to a channel where the dedicated iOS engineers can look at them without being bothered by the issues that `sentry.javascript` generates (we have a separate team looking closely at any JS errors).\r\n\r\nWhen I went to go set up a reporting rule, I noticed that there's no way to filter based on `sdk.name`:\r\n\r\n\n\n### Solution Brainstorm\n\nHopefully it would be pretty easy to add this new option to the dropdown \ud83e\udd1e \n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import forms\n\nfrom sentry.rules.conditions.base import EventCondition\n\n\nclass MatchType:\n EQUAL = \"eq\"\n NOT_EQUAL = \"ne\"\n STARTS_WITH = \"sw\"\n NOT_STARTS_WITH = \"nsw\"\n ENDS_WITH = \"ew\"\n NOT_ENDS_WITH = \"new\"\n CONTAINS = \"co\"\n NOT_CONTAINS = \"nc\"\n IS_SET = \"is\"\n NOT_SET = \"ns\"\n\n\nMATCH_CHOICES = OrderedDict(\n [\n (MatchType.EQUAL, \"equals\"),\n (MatchType.NOT_EQUAL, \"does not equal\"),\n (MatchType.STARTS_WITH, \"starts with\"),\n (MatchType.NOT_STARTS_WITH, \"does not start with\"),\n (MatchType.ENDS_WITH, \"ends with\"),\n (MatchType.NOT_ENDS_WITH, \"does not end with\"),\n (MatchType.CONTAINS, \"contains\"),\n (MatchType.NOT_CONTAINS, \"does not contain\"),\n (MatchType.IS_SET, \"is set\"),\n (MatchType.NOT_SET, \"is not set\"),\n ]\n)\n\nATTR_CHOICES = [\n \"message\",\n \"platform\",\n \"environment\",\n \"type\",\n \"exception.type\",\n \"exception.value\",\n \"user.id\",\n \"user.email\",\n \"user.username\",\n \"user.ip_address\",\n \"http.method\",\n \"http.url\",\n \"stacktrace.code\",\n \"stacktrace.module\",\n \"stacktrace.filename\",\n \"stacktrace.abs_path\",\n \"stacktrace.package\",\n]\n\n\nclass EventAttributeForm(forms.Form):\n attribute = forms.ChoiceField(choices=[(a, a) for a in ATTR_CHOICES])\n match = forms.ChoiceField(choices=list(MATCH_CHOICES.items()))\n value = forms.CharField(widget=forms.TextInput(), required=False)\n\n\nclass EventAttributeCondition(EventCondition):\n \"\"\"\n Attributes are a mapping of <logical-key>.<property>.\n\n For example:\n\n - message\n - platform\n - exception.{type,value}\n - user.{id,ip_address,email,FIELD}\n - http.{method,url}\n - stacktrace.{code,module,filename,abs_path,package}\n - extra.{FIELD}\n \"\"\"\n\n # TODO(dcramer): add support for stacktrace.vars.[name]\n\n form_cls = EventAttributeForm\n label = \"The event's {attribute} value {match} {value}\"\n\n form_fields = {\n \"attribute\": {\n \"type\": \"choice\",\n \"placeholder\": \"i.e. exception.type\",\n \"choices\": [[a, a] for a in ATTR_CHOICES],\n },\n \"match\": {\"type\": \"choice\", \"choices\": list(MATCH_CHOICES.items())},\n \"value\": {\"type\": \"string\", \"placeholder\": \"value\"},\n }\n\n def _get_attribute_values(self, event, attr):\n # TODO(dcramer): we should validate attributes (when we can) before\n path = attr.split(\".\")\n\n if path[0] == \"platform\":\n if len(path) != 1:\n return []\n return [event.platform]\n\n if path[0] == \"message\":\n if len(path) != 1:\n return []\n return [event.message, event.search_message]\n elif path[0] == \"environment\":\n return [event.get_tag(\"environment\")]\n\n elif path[0] == \"type\":\n return [event.data[\"type\"]]\n\n elif len(path) == 1:\n return []\n\n elif path[0] == \"extra\":\n path.pop(0)\n value = event.data[\"extra\"]\n while path:\n bit = path.pop(0)\n value = value.get(bit)\n if not value:\n return []\n\n if isinstance(value, (list, tuple)):\n return value\n return [value]\n\n elif len(path) != 2:\n return []\n\n elif path[0] == \"exception\":\n if path[1] not in (\"type\", \"value\"):\n return []\n\n return [getattr(e, path[1]) for e in event.interfaces[\"exception\"].values]\n\n elif path[0] == \"user\":\n if path[1] in (\"id\", \"ip_address\", \"email\", \"username\"):\n return [getattr(event.interfaces[\"user\"], path[1])]\n return [getattr(event.interfaces[\"user\"].data, path[1])]\n\n elif path[0] == \"http\":\n if path[1] not in (\"url\", \"method\"):\n return []\n\n return [getattr(event.interfaces[\"request\"], path[1])]\n\n elif path[0] == \"stacktrace\":\n stacks = event.interfaces.get(\"stacktrace\")\n if stacks:\n stacks = [stacks]\n else:\n stacks = [\n e.stacktrace for e in event.interfaces[\"exception\"].values if e.stacktrace\n ]\n result = []\n for st in stacks:\n for frame in st.frames:\n if path[1] in (\"filename\", \"module\", \"abs_path\", \"package\"):\n result.append(getattr(frame, path[1]))\n elif path[1] == \"code\":\n if frame.pre_context:\n result.extend(frame.pre_context)\n if frame.context_line:\n result.append(frame.context_line)\n if frame.post_context:\n result.extend(frame.post_context)\n return result\n return []\n\n def render_label(self):\n data = {\n \"attribute\": self.data[\"attribute\"],\n \"value\": self.data[\"value\"],\n \"match\": MATCH_CHOICES[self.data[\"match\"]],\n }\n return self.label.format(**data)\n\n def passes(self, event, state, **kwargs):\n attr = self.get_option(\"attribute\")\n match = self.get_option(\"match\")\n value = self.get_option(\"value\")\n\n if not (attr and match and value):\n return False\n\n value = value.lower()\n attr = attr.lower()\n\n try:\n attribute_values = self._get_attribute_values(event, attr)\n except KeyError:\n attribute_values = []\n\n attribute_values = [str(v).lower() for v in attribute_values if v is not None]\n\n if match == MatchType.EQUAL:\n for a_value in attribute_values:\n if a_value == value:\n return True\n return False\n\n elif match == MatchType.NOT_EQUAL:\n for a_value in attribute_values:\n if a_value == value:\n return False\n return True\n\n elif match == MatchType.STARTS_WITH:\n for a_value in attribute_values:\n if a_value.startswith(value):\n return True\n return False\n\n elif match == MatchType.NOT_STARTS_WITH:\n for a_value in attribute_values:\n if a_value.startswith(value):\n return False\n return True\n\n elif match == MatchType.ENDS_WITH:\n for a_value in attribute_values:\n if a_value.endswith(value):\n return True\n return False\n\n elif match == MatchType.NOT_ENDS_WITH:\n for a_value in attribute_values:\n if a_value.endswith(value):\n return False\n return True\n\n elif match == MatchType.CONTAINS:\n for a_value in attribute_values:\n if value in a_value:\n return True\n return False\n\n elif match == MatchType.NOT_CONTAINS:\n for a_value in attribute_values:\n if value in a_value:\n return False\n return True\n\n elif match == MatchType.IS_SET:\n return bool(attribute_values)\n\n elif match == MatchType.NOT_SET:\n return not attribute_values\n", "path": "src/sentry/rules/conditions/event_attribute.py"}]}
| 3,046 | 196 |
gh_patches_debug_19101
|
rasdani/github-patches
|
git_diff
|
aio-libs-abandoned__aioredis-py-1075
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[2.0] Update setup.py Trove classifiers
The [classifiers](https://github.com/aio-libs/aioredis-py/blob/5a713fff3717094cca63e4a5f4b1cb7d6894a08f/setup.py#L25-L30) currently only list Python 3.6 and 3.7. This should be updated to include all versions that are tested. Or my personal preference is just to delete classifiers for minor versions, since probably no-one will remember to update them when new versions of Python are released.
It also indicates the status as 4 - Beta. That should probably change when we release the final 2.0.0.
</issue>
<code>
[start of setup.py]
1 import os.path
2 import re
3
4 from setuptools import find_packages, setup
5
6
7 def read(*parts):
8 with open(os.path.join(*parts)) as f:
9 return f.read().strip()
10
11
12 def read_version():
13 regexp = re.compile(r"^__version__\W*=\W*\"([\d.abrc]+)\"")
14 init_py = os.path.join(os.path.dirname(__file__), "aioredis", "__init__.py")
15 with open(init_py) as f:
16 for line in f:
17 match = regexp.match(line)
18 if match is not None:
19 return match.group(1)
20 raise RuntimeError(f"Cannot find version in {init_py}")
21
22
23 classifiers = [
24 "License :: OSI Approved :: MIT License",
25 "Development Status :: 4 - Beta",
26 "Programming Language :: Python",
27 "Programming Language :: Python :: 3",
28 "Programming Language :: Python :: 3.6",
29 "Programming Language :: Python :: 3.7",
30 "Programming Language :: Python :: 3 :: Only",
31 "Operating System :: POSIX",
32 "Environment :: Web Environment",
33 "Intended Audience :: Developers",
34 "Topic :: Software Development",
35 "Topic :: Software Development :: Libraries",
36 "Framework :: AsyncIO",
37 ]
38
39 setup(
40 name="aioredis",
41 version=read_version(),
42 description="asyncio (PEP 3156) Redis support",
43 long_description="\n\n".join((read("README.md"), read("CHANGELOG.md"))),
44 long_description_content_type="text/markdown",
45 classifiers=classifiers,
46 platforms=["POSIX"],
47 url="https://github.com/aio-libs/aioredis",
48 license="MIT",
49 packages=find_packages(exclude=["tests"]),
50 install_requires=[
51 "async-timeout",
52 "typing-extensions",
53 ],
54 extras_require={
55 "hiredis": 'hiredis>=1.0; implementation_name=="cpython"',
56 },
57 package_data={"aioredis": ["py.typed"]},
58 python_requires=">=3.6",
59 include_package_data=True,
60 )
61
[end of setup.py]
[start of aioredis/__init__.py]
1 from aioredis.client import Redis, StrictRedis
2 from aioredis.connection import (
3 BlockingConnectionPool,
4 Connection,
5 ConnectionPool,
6 SSLConnection,
7 UnixDomainSocketConnection,
8 )
9 from aioredis.exceptions import (
10 AuthenticationError,
11 AuthenticationWrongNumberOfArgsError,
12 BusyLoadingError,
13 ChildDeadlockedError,
14 ConnectionError,
15 DataError,
16 InvalidResponse,
17 PubSubError,
18 ReadOnlyError,
19 RedisError,
20 ResponseError,
21 TimeoutError,
22 WatchError,
23 )
24 from aioredis.utils import from_url
25
26
27 def int_or_str(value):
28 try:
29 return int(value)
30 except ValueError:
31 return value
32
33
34 __version__ = "2.0.0b1"
35 VERSION = tuple(map(int_or_str, __version__.split(".")))
36
37 __all__ = [
38 "AuthenticationError",
39 "AuthenticationWrongNumberOfArgsError",
40 "BlockingConnectionPool",
41 "BusyLoadingError",
42 "ChildDeadlockedError",
43 "Connection",
44 "ConnectionError",
45 "ConnectionPool",
46 "DataError",
47 "from_url",
48 "InvalidResponse",
49 "PubSubError",
50 "ReadOnlyError",
51 "Redis",
52 "RedisError",
53 "ResponseError",
54 "SSLConnection",
55 "StrictRedis",
56 "TimeoutError",
57 "UnixDomainSocketConnection",
58 "WatchError",
59 ]
60
[end of aioredis/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/aioredis/__init__.py b/aioredis/__init__.py
--- a/aioredis/__init__.py
+++ b/aioredis/__init__.py
@@ -31,7 +31,7 @@
return value
-__version__ = "2.0.0b1"
+__version__ = "2.0.0"
VERSION = tuple(map(int_or_str, __version__.split(".")))
__all__ = [
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,11 +22,13 @@
classifiers = [
"License :: OSI Approved :: MIT License",
- "Development Status :: 4 - Beta",
+ "Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: POSIX",
"Environment :: Web Environment",
|
{"golden_diff": "diff --git a/aioredis/__init__.py b/aioredis/__init__.py\n--- a/aioredis/__init__.py\n+++ b/aioredis/__init__.py\n@@ -31,7 +31,7 @@\n return value\n \n \n-__version__ = \"2.0.0b1\"\n+__version__ = \"2.0.0\"\n VERSION = tuple(map(int_or_str, __version__.split(\".\")))\n \n __all__ = [\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,11 +22,13 @@\n \n classifiers = [\n \"License :: OSI Approved :: MIT License\",\n- \"Development Status :: 4 - Beta\",\n+ \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n", "issue": "[2.0] Update setup.py Trove classifiers\nThe [classifiers](https://github.com/aio-libs/aioredis-py/blob/5a713fff3717094cca63e4a5f4b1cb7d6894a08f/setup.py#L25-L30) currently only list Python 3.6 and 3.7. This should be updated to include all versions that are tested. Or my personal preference is just to delete classifiers for minor versions, since probably no-one will remember to update them when new versions of Python are released.\r\n\r\nIt also indicates the status as 4 - Beta. That should probably change when we release the final 2.0.0.\n", "before_files": [{"content": "import os.path\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef read(*parts):\n with open(os.path.join(*parts)) as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*\\\"([\\d.abrc]+)\\\"\")\n init_py = os.path.join(os.path.dirname(__file__), \"aioredis\", \"__init__.py\")\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError(f\"Cannot find version in {init_py}\")\n\n\nclassifiers = [\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Framework :: AsyncIO\",\n]\n\nsetup(\n name=\"aioredis\",\n version=read_version(),\n description=\"asyncio (PEP 3156) Redis support\",\n long_description=\"\\n\\n\".join((read(\"README.md\"), read(\"CHANGELOG.md\"))),\n long_description_content_type=\"text/markdown\",\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=[\n \"async-timeout\",\n \"typing-extensions\",\n ],\n extras_require={\n \"hiredis\": 'hiredis>=1.0; implementation_name==\"cpython\"',\n },\n package_data={\"aioredis\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n include_package_data=True,\n)\n", "path": "setup.py"}, {"content": "from aioredis.client import Redis, StrictRedis\nfrom aioredis.connection import (\n BlockingConnectionPool,\n Connection,\n ConnectionPool,\n SSLConnection,\n UnixDomainSocketConnection,\n)\nfrom aioredis.exceptions import (\n AuthenticationError,\n AuthenticationWrongNumberOfArgsError,\n BusyLoadingError,\n ChildDeadlockedError,\n ConnectionError,\n DataError,\n InvalidResponse,\n PubSubError,\n ReadOnlyError,\n RedisError,\n ResponseError,\n TimeoutError,\n WatchError,\n)\nfrom aioredis.utils import from_url\n\n\ndef int_or_str(value):\n try:\n return int(value)\n except ValueError:\n return value\n\n\n__version__ = \"2.0.0b1\"\nVERSION = tuple(map(int_or_str, __version__.split(\".\")))\n\n__all__ = [\n \"AuthenticationError\",\n \"AuthenticationWrongNumberOfArgsError\",\n \"BlockingConnectionPool\",\n \"BusyLoadingError\",\n \"ChildDeadlockedError\",\n \"Connection\",\n \"ConnectionError\",\n \"ConnectionPool\",\n \"DataError\",\n \"from_url\",\n \"InvalidResponse\",\n \"PubSubError\",\n \"ReadOnlyError\",\n \"Redis\",\n \"RedisError\",\n \"ResponseError\",\n \"SSLConnection\",\n \"StrictRedis\",\n \"TimeoutError\",\n \"UnixDomainSocketConnection\",\n \"WatchError\",\n]\n", "path": "aioredis/__init__.py"}]}
| 1,674 | 272 |
gh_patches_debug_24332
|
rasdani/github-patches
|
git_diff
|
automl__auto-sklearn-1662
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Question] restriction of y_max
# Short Question Description
A clear single sentence question we can try to help with?
In the [predict](https://github.com/automl/auto-sklearn/blob/63bfbebbd288c8669d6bce7f44f8c9a3a82facd5/autosklearn/pipeline/regression.py#L108)() of Class SimpleRegressionPipeline, there are restrictions on the min/max values of y.
```
def predict(self, X, batch_size=None):
y = super().predict(X, batch_size=batch_size)
y[y > (2 * self.y_max_)] = 2 * self.y_max_
if self.y_min_ < 0:
y[y < (2 * self.y_min_)] = 2 * self.y_min_
elif self.y_min_ > 0:
y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_
return y
```
My question is should we also consider y_max < 0 like y_min?
```
def predict(self, X, batch_size=None):
y = super().predict(X, batch_size=batch_size)
if self.y_max > 0:
y[y > (2 * self.y_max_)] = 2 * self.y_max_
elif self.y_max < 0:
y[y > (0.5 * self.y_max_)] = 0.5 * self.y_max_
if self.y_min_ < 0:
y[y < (2 * self.y_min_)] = 2 * self.y_min_
elif self.y_min_ > 0:
y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_
return y
```
If I have missed anything, please let me know.
Many thanks!
</issue>
<code>
[start of autosklearn/pipeline/regression.py]
1 from typing import Optional, Union
2
3 import copy
4 from itertools import product
5
6 import numpy as np
7 from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
8 from ConfigSpace.forbidden import ForbiddenAndConjunction, ForbiddenEqualsClause
9 from sklearn.base import RegressorMixin
10
11 from autosklearn.askl_typing import FEAT_TYPE_TYPE
12 from autosklearn.pipeline.base import BasePipeline
13 from autosklearn.pipeline.components import (
14 feature_preprocessing as feature_preprocessing_components,
15 )
16 from autosklearn.pipeline.components import regression as regression_components
17 from autosklearn.pipeline.components.data_preprocessing import DataPreprocessorChoice
18 from autosklearn.pipeline.constants import SPARSE
19
20
21 class SimpleRegressionPipeline(RegressorMixin, BasePipeline):
22 """This class implements the regression task.
23
24 It implements a pipeline, which includes one preprocessing step and one
25 regression algorithm. It can render a search space including all known
26 regression and preprocessing algorithms.
27
28 Contrary to the sklearn API it is not possible to enumerate the
29 possible parameters in the __init__ function because we only know the
30 available regressors at runtime. For this reason the user must
31 specifiy the parameters by passing an instance of
32 ConfigSpace.configuration_space.Configuration.
33
34 Parameters
35 ----------
36 config : ConfigSpace.configuration_space.Configuration
37 The configuration to evaluate.
38
39 random_state : Optional[int | RandomState]
40 If int, random_state is the seed used by the random number generator;
41 If RandomState instance, random_state is the random number generator;
42 If None, the random number generator is the RandomState instance
43 used by `np.random`.
44
45 Attributes
46 ----------
47 _estimator : The underlying scikit-learn regression model. This
48 variable is assigned after a call to the
49 :meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`
50 method.
51
52 _preprocessor : The underlying scikit-learn preprocessing algorithm. This
53 variable is only assigned if a preprocessor is specified and
54 after a call to the
55 :meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`
56 method.
57
58 See also
59 --------
60
61 References
62 ----------
63
64 Examples
65 --------
66
67 """
68
69 def __init__(
70 self,
71 config: Optional[Configuration] = None,
72 feat_type: Optional[FEAT_TYPE_TYPE] = None,
73 steps=None,
74 dataset_properties=None,
75 include=None,
76 exclude=None,
77 random_state: Optional[Union[int, np.random.RandomState]] = None,
78 init_params=None,
79 ):
80 self._output_dtype = np.float32
81 if dataset_properties is None:
82 dataset_properties = dict()
83 if "target_type" not in dataset_properties:
84 dataset_properties["target_type"] = "regression"
85 super().__init__(
86 feat_type=feat_type,
87 config=config,
88 steps=steps,
89 dataset_properties=dataset_properties,
90 include=include,
91 exclude=exclude,
92 random_state=random_state,
93 init_params=init_params,
94 )
95
96 def fit_estimator(self, X, y, **fit_params):
97 self.y_max_ = np.nanmax(y)
98 self.y_min_ = np.nanmin(y)
99 return super(SimpleRegressionPipeline, self).fit_estimator(X, y, **fit_params)
100
101 def iterative_fit(self, X, y, n_iter=1, **fit_params):
102 self.y_max_ = np.nanmax(y)
103 self.y_min_ = np.nanmin(y)
104 return super(SimpleRegressionPipeline, self).iterative_fit(
105 X, y, n_iter=n_iter, **fit_params
106 )
107
108 def predict(self, X, batch_size=None):
109 y = super().predict(X, batch_size=batch_size)
110 y[y > (2 * self.y_max_)] = 2 * self.y_max_
111 if self.y_min_ < 0:
112 y[y < (2 * self.y_min_)] = 2 * self.y_min_
113 elif self.y_min_ > 0:
114 y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_
115 return y
116
117 def _get_hyperparameter_search_space(
118 self,
119 feat_type: Optional[FEAT_TYPE_TYPE] = None,
120 include=None,
121 exclude=None,
122 dataset_properties=None,
123 ):
124 """Return the configuration space for the CASH problem.
125
126 Parameters
127 ----------
128 include : dict
129 If include is given, only the modules specified for nodes
130 are used. Specify them by their module name; e.g., to include
131 only the SVM use :python:`include={'regressor':['svr']}`.
132
133 exclude : dict
134 If exclude is given, only the components specified for nodes
135 are used. Specify them by their module name; e.g., to include
136 all regressors except the SVM use
137 :python:`exclude=['regressor': 'svr']`.
138
139 Returns
140 -------
141 cs : ConfigSpace.configuration_space.Configuration
142 The configuration space describing the SimpleRegressionClassifier.
143 """
144 cs = ConfigurationSpace()
145
146 if dataset_properties is None or not isinstance(dataset_properties, dict):
147 dataset_properties = dict()
148 if "target_type" not in dataset_properties:
149 dataset_properties["target_type"] = "regression"
150 if dataset_properties["target_type"] != "regression":
151 dataset_properties["target_type"] = "regression"
152
153 if "sparse" not in dataset_properties:
154 # This dataset is probably dense
155 dataset_properties["sparse"] = False
156
157 cs = self._get_base_search_space(
158 cs=cs,
159 feat_type=feat_type,
160 dataset_properties=dataset_properties,
161 exclude=exclude,
162 include=include,
163 pipeline=self.steps,
164 )
165
166 regressors = cs.get_hyperparameter("regressor:__choice__").choices
167 preprocessors = cs.get_hyperparameter("feature_preprocessor:__choice__").choices
168 available_regressors = self._final_estimator.get_available_components(
169 dataset_properties
170 )
171
172 possible_default_regressor = copy.copy(list(available_regressors.keys()))
173 default = cs.get_hyperparameter("regressor:__choice__").default_value
174 del possible_default_regressor[possible_default_regressor.index(default)]
175
176 # A regressor which can handle sparse data after the densifier is
177 # forbidden for memory issues
178 for key in regressors:
179 if (
180 SPARSE
181 in available_regressors[key].get_properties(dataset_properties=None)[
182 "input"
183 ]
184 ):
185 if "densifier" in preprocessors:
186 while True:
187 try:
188 forb_reg = ForbiddenEqualsClause(
189 cs.get_hyperparameter("regressor:__choice__"), key
190 )
191 forb_fpp = ForbiddenEqualsClause(
192 cs.get_hyperparameter(
193 "feature_preprocessor:__choice__"
194 ),
195 "densifier",
196 )
197 cs.add_forbidden_clause(
198 ForbiddenAndConjunction(forb_reg, forb_fpp)
199 )
200 # Success
201 break
202 except ValueError:
203 # Change the default and try again
204 try:
205 default = possible_default_regressor.pop()
206 except IndexError:
207 raise ValueError(
208 "Cannot find a legal default configuration."
209 )
210 cs.get_hyperparameter(
211 "regressor:__choice__"
212 ).default_value = default
213
214 # which would take too long
215 # Combinations of tree-based models with feature learning:
216 regressors_ = [
217 "adaboost",
218 "ard_regression",
219 "decision_tree",
220 "extra_trees",
221 "gaussian_process",
222 "gradient_boosting",
223 "k_nearest_neighbors",
224 "libsvm_svr",
225 "mlp",
226 "random_forest",
227 ]
228 feature_learning_ = ["kitchen_sinks", "kernel_pca", "nystroem_sampler"]
229
230 for r, f in product(regressors_, feature_learning_):
231 if r not in regressors:
232 continue
233 if f not in preprocessors:
234 continue
235 while True:
236 try:
237 cs.add_forbidden_clause(
238 ForbiddenAndConjunction(
239 ForbiddenEqualsClause(
240 cs.get_hyperparameter("regressor:__choice__"), r
241 ),
242 ForbiddenEqualsClause(
243 cs.get_hyperparameter(
244 "feature_preprocessor:__choice__"
245 ),
246 f,
247 ),
248 )
249 )
250 break
251 except KeyError:
252 break
253 except ValueError:
254 # Change the default and try again
255 try:
256 default = possible_default_regressor.pop()
257 except IndexError:
258 raise ValueError("Cannot find a legal default configuration.")
259 cs.get_hyperparameter(
260 "regressor:__choice__"
261 ).default_value = default
262
263 self.configuration_space = cs
264 self.dataset_properties = dataset_properties
265 return cs
266
267 def _get_estimator_components(self):
268 return regression_components._regressors
269
270 def _get_pipeline_steps(
271 self, dataset_properties, feat_type: Optional[FEAT_TYPE_TYPE] = None
272 ):
273 steps = []
274
275 default_dataset_properties = {"target_type": "regression"}
276 if dataset_properties is not None and isinstance(dataset_properties, dict):
277 default_dataset_properties.update(dataset_properties)
278
279 steps.extend(
280 [
281 [
282 "data_preprocessor",
283 DataPreprocessorChoice(
284 feat_type=feat_type,
285 dataset_properties=default_dataset_properties,
286 random_state=self.random_state,
287 ),
288 ],
289 [
290 "feature_preprocessor",
291 feature_preprocessing_components.FeaturePreprocessorChoice(
292 feat_type=feat_type,
293 dataset_properties=default_dataset_properties,
294 random_state=self.random_state,
295 ),
296 ],
297 [
298 "regressor",
299 regression_components.RegressorChoice(
300 feat_type=feat_type,
301 dataset_properties=default_dataset_properties,
302 random_state=self.random_state,
303 ),
304 ],
305 ]
306 )
307
308 return steps
309
310 def _get_estimator_hyperparameter_name(self):
311 return "regressor"
312
[end of autosklearn/pipeline/regression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/autosklearn/pipeline/regression.py b/autosklearn/pipeline/regression.py
--- a/autosklearn/pipeline/regression.py
+++ b/autosklearn/pipeline/regression.py
@@ -106,12 +106,35 @@
)
def predict(self, X, batch_size=None):
+ """Predict the classes using the selected model.
+
+ Predicted values are capped to approximately the maximum and minimum labels
+ seen during training.
+
+ Parameters
+ ----------
+ X : array-like, shape = (n_samples, n_features)
+
+ batch_size: int or None, defaults to None
+ batch_size controls whether the pipeline will be
+ called on small chunks of the data. Useful when calling the
+ predict method on the whole array X results in a MemoryError.
+
+ Returns
+ -------
+ array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
+ Returns the predicted values"""
y = super().predict(X, batch_size=batch_size)
- y[y > (2 * self.y_max_)] = 2 * self.y_max_
+
+ if self.y_max_ > 0:
+ y[y > (2 * self.y_max_)] = 2 * self.y_max_
+ elif self.y_max_ < 0:
+ y[y > (0.5 * self.y_max_)] = 0.5 * self.y_max_
if self.y_min_ < 0:
y[y < (2 * self.y_min_)] = 2 * self.y_min_
elif self.y_min_ > 0:
y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_
+
return y
def _get_hyperparameter_search_space(
|
{"golden_diff": "diff --git a/autosklearn/pipeline/regression.py b/autosklearn/pipeline/regression.py\n--- a/autosklearn/pipeline/regression.py\n+++ b/autosklearn/pipeline/regression.py\n@@ -106,12 +106,35 @@\n )\n \n def predict(self, X, batch_size=None):\n+ \"\"\"Predict the classes using the selected model.\n+\n+ Predicted values are capped to approximately the maximum and minimum labels\n+ seen during training.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape = (n_samples, n_features)\n+\n+ batch_size: int or None, defaults to None\n+ batch_size controls whether the pipeline will be\n+ called on small chunks of the data. Useful when calling the\n+ predict method on the whole array X results in a MemoryError.\n+\n+ Returns\n+ -------\n+ array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)\n+ Returns the predicted values\"\"\"\n y = super().predict(X, batch_size=batch_size)\n- y[y > (2 * self.y_max_)] = 2 * self.y_max_\n+\n+ if self.y_max_ > 0:\n+ y[y > (2 * self.y_max_)] = 2 * self.y_max_\n+ elif self.y_max_ < 0:\n+ y[y > (0.5 * self.y_max_)] = 0.5 * self.y_max_\n if self.y_min_ < 0:\n y[y < (2 * self.y_min_)] = 2 * self.y_min_\n elif self.y_min_ > 0:\n y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_\n+\n return y\n \n def _get_hyperparameter_search_space(\n", "issue": "[Question] restriction of y_max\n# Short Question Description\r\nA clear single sentence question we can try to help with?\r\n\r\n\r\nIn the [predict](https://github.com/automl/auto-sklearn/blob/63bfbebbd288c8669d6bce7f44f8c9a3a82facd5/autosklearn/pipeline/regression.py#L108)() of Class SimpleRegressionPipeline, there are restrictions on the min/max values of y. \r\n```\r\ndef predict(self, X, batch_size=None):\r\n y = super().predict(X, batch_size=batch_size)\r\n y[y > (2 * self.y_max_)] = 2 * self.y_max_\r\n if self.y_min_ < 0:\r\n y[y < (2 * self.y_min_)] = 2 * self.y_min_\r\n elif self.y_min_ > 0:\r\n y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_\r\n return y\r\n```\r\n\r\nMy question is should we also consider y_max < 0 like y_min?\r\n\r\n```\r\ndef predict(self, X, batch_size=None):\r\n y = super().predict(X, batch_size=batch_size)\r\n if self.y_max > 0:\r\n y[y > (2 * self.y_max_)] = 2 * self.y_max_\r\n elif self.y_max < 0:\r\n y[y > (0.5 * self.y_max_)] = 0.5 * self.y_max_\r\n if self.y_min_ < 0:\r\n y[y < (2 * self.y_min_)] = 2 * self.y_min_\r\n elif self.y_min_ > 0:\r\n y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_\r\n return y\r\n```\r\n\r\nIf I have missed anything, please let me know. \r\nMany thanks!\r\n\r\n\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport copy\nfrom itertools import product\n\nimport numpy as np\nfrom ConfigSpace.configuration_space import Configuration, ConfigurationSpace\nfrom ConfigSpace.forbidden import ForbiddenAndConjunction, ForbiddenEqualsClause\nfrom sklearn.base import RegressorMixin\n\nfrom autosklearn.askl_typing import FEAT_TYPE_TYPE\nfrom autosklearn.pipeline.base import BasePipeline\nfrom autosklearn.pipeline.components import (\n feature_preprocessing as feature_preprocessing_components,\n)\nfrom autosklearn.pipeline.components import regression as regression_components\nfrom autosklearn.pipeline.components.data_preprocessing import DataPreprocessorChoice\nfrom autosklearn.pipeline.constants import SPARSE\n\n\nclass SimpleRegressionPipeline(RegressorMixin, BasePipeline):\n \"\"\"This class implements the regression task.\n\n It implements a pipeline, which includes one preprocessing step and one\n regression algorithm. It can render a search space including all known\n regression and preprocessing algorithms.\n\n Contrary to the sklearn API it is not possible to enumerate the\n possible parameters in the __init__ function because we only know the\n available regressors at runtime. For this reason the user must\n specifiy the parameters by passing an instance of\n ConfigSpace.configuration_space.Configuration.\n\n Parameters\n ----------\n config : ConfigSpace.configuration_space.Configuration\n The configuration to evaluate.\n\n random_state : Optional[int | RandomState]\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance\n used by `np.random`.\n\n Attributes\n ----------\n _estimator : The underlying scikit-learn regression model. This\n variable is assigned after a call to the\n :meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`\n method.\n\n _preprocessor : The underlying scikit-learn preprocessing algorithm. This\n variable is only assigned if a preprocessor is specified and\n after a call to the\n :meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`\n method.\n\n See also\n --------\n\n References\n ----------\n\n Examples\n --------\n\n \"\"\"\n\n def __init__(\n self,\n config: Optional[Configuration] = None,\n feat_type: Optional[FEAT_TYPE_TYPE] = None,\n steps=None,\n dataset_properties=None,\n include=None,\n exclude=None,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n init_params=None,\n ):\n self._output_dtype = np.float32\n if dataset_properties is None:\n dataset_properties = dict()\n if \"target_type\" not in dataset_properties:\n dataset_properties[\"target_type\"] = \"regression\"\n super().__init__(\n feat_type=feat_type,\n config=config,\n steps=steps,\n dataset_properties=dataset_properties,\n include=include,\n exclude=exclude,\n random_state=random_state,\n init_params=init_params,\n )\n\n def fit_estimator(self, X, y, **fit_params):\n self.y_max_ = np.nanmax(y)\n self.y_min_ = np.nanmin(y)\n return super(SimpleRegressionPipeline, self).fit_estimator(X, y, **fit_params)\n\n def iterative_fit(self, X, y, n_iter=1, **fit_params):\n self.y_max_ = np.nanmax(y)\n self.y_min_ = np.nanmin(y)\n return super(SimpleRegressionPipeline, self).iterative_fit(\n X, y, n_iter=n_iter, **fit_params\n )\n\n def predict(self, X, batch_size=None):\n y = super().predict(X, batch_size=batch_size)\n y[y > (2 * self.y_max_)] = 2 * self.y_max_\n if self.y_min_ < 0:\n y[y < (2 * self.y_min_)] = 2 * self.y_min_\n elif self.y_min_ > 0:\n y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_\n return y\n\n def _get_hyperparameter_search_space(\n self,\n feat_type: Optional[FEAT_TYPE_TYPE] = None,\n include=None,\n exclude=None,\n dataset_properties=None,\n ):\n \"\"\"Return the configuration space for the CASH problem.\n\n Parameters\n ----------\n include : dict\n If include is given, only the modules specified for nodes\n are used. Specify them by their module name; e.g., to include\n only the SVM use :python:`include={'regressor':['svr']}`.\n\n exclude : dict\n If exclude is given, only the components specified for nodes\n are used. Specify them by their module name; e.g., to include\n all regressors except the SVM use\n :python:`exclude=['regressor': 'svr']`.\n\n Returns\n -------\n cs : ConfigSpace.configuration_space.Configuration\n The configuration space describing the SimpleRegressionClassifier.\n \"\"\"\n cs = ConfigurationSpace()\n\n if dataset_properties is None or not isinstance(dataset_properties, dict):\n dataset_properties = dict()\n if \"target_type\" not in dataset_properties:\n dataset_properties[\"target_type\"] = \"regression\"\n if dataset_properties[\"target_type\"] != \"regression\":\n dataset_properties[\"target_type\"] = \"regression\"\n\n if \"sparse\" not in dataset_properties:\n # This dataset is probably dense\n dataset_properties[\"sparse\"] = False\n\n cs = self._get_base_search_space(\n cs=cs,\n feat_type=feat_type,\n dataset_properties=dataset_properties,\n exclude=exclude,\n include=include,\n pipeline=self.steps,\n )\n\n regressors = cs.get_hyperparameter(\"regressor:__choice__\").choices\n preprocessors = cs.get_hyperparameter(\"feature_preprocessor:__choice__\").choices\n available_regressors = self._final_estimator.get_available_components(\n dataset_properties\n )\n\n possible_default_regressor = copy.copy(list(available_regressors.keys()))\n default = cs.get_hyperparameter(\"regressor:__choice__\").default_value\n del possible_default_regressor[possible_default_regressor.index(default)]\n\n # A regressor which can handle sparse data after the densifier is\n # forbidden for memory issues\n for key in regressors:\n if (\n SPARSE\n in available_regressors[key].get_properties(dataset_properties=None)[\n \"input\"\n ]\n ):\n if \"densifier\" in preprocessors:\n while True:\n try:\n forb_reg = ForbiddenEqualsClause(\n cs.get_hyperparameter(\"regressor:__choice__\"), key\n )\n forb_fpp = ForbiddenEqualsClause(\n cs.get_hyperparameter(\n \"feature_preprocessor:__choice__\"\n ),\n \"densifier\",\n )\n cs.add_forbidden_clause(\n ForbiddenAndConjunction(forb_reg, forb_fpp)\n )\n # Success\n break\n except ValueError:\n # Change the default and try again\n try:\n default = possible_default_regressor.pop()\n except IndexError:\n raise ValueError(\n \"Cannot find a legal default configuration.\"\n )\n cs.get_hyperparameter(\n \"regressor:__choice__\"\n ).default_value = default\n\n # which would take too long\n # Combinations of tree-based models with feature learning:\n regressors_ = [\n \"adaboost\",\n \"ard_regression\",\n \"decision_tree\",\n \"extra_trees\",\n \"gaussian_process\",\n \"gradient_boosting\",\n \"k_nearest_neighbors\",\n \"libsvm_svr\",\n \"mlp\",\n \"random_forest\",\n ]\n feature_learning_ = [\"kitchen_sinks\", \"kernel_pca\", \"nystroem_sampler\"]\n\n for r, f in product(regressors_, feature_learning_):\n if r not in regressors:\n continue\n if f not in preprocessors:\n continue\n while True:\n try:\n cs.add_forbidden_clause(\n ForbiddenAndConjunction(\n ForbiddenEqualsClause(\n cs.get_hyperparameter(\"regressor:__choice__\"), r\n ),\n ForbiddenEqualsClause(\n cs.get_hyperparameter(\n \"feature_preprocessor:__choice__\"\n ),\n f,\n ),\n )\n )\n break\n except KeyError:\n break\n except ValueError:\n # Change the default and try again\n try:\n default = possible_default_regressor.pop()\n except IndexError:\n raise ValueError(\"Cannot find a legal default configuration.\")\n cs.get_hyperparameter(\n \"regressor:__choice__\"\n ).default_value = default\n\n self.configuration_space = cs\n self.dataset_properties = dataset_properties\n return cs\n\n def _get_estimator_components(self):\n return regression_components._regressors\n\n def _get_pipeline_steps(\n self, dataset_properties, feat_type: Optional[FEAT_TYPE_TYPE] = None\n ):\n steps = []\n\n default_dataset_properties = {\"target_type\": \"regression\"}\n if dataset_properties is not None and isinstance(dataset_properties, dict):\n default_dataset_properties.update(dataset_properties)\n\n steps.extend(\n [\n [\n \"data_preprocessor\",\n DataPreprocessorChoice(\n feat_type=feat_type,\n dataset_properties=default_dataset_properties,\n random_state=self.random_state,\n ),\n ],\n [\n \"feature_preprocessor\",\n feature_preprocessing_components.FeaturePreprocessorChoice(\n feat_type=feat_type,\n dataset_properties=default_dataset_properties,\n random_state=self.random_state,\n ),\n ],\n [\n \"regressor\",\n regression_components.RegressorChoice(\n feat_type=feat_type,\n dataset_properties=default_dataset_properties,\n random_state=self.random_state,\n ),\n ],\n ]\n )\n\n return steps\n\n def _get_estimator_hyperparameter_name(self):\n return \"regressor\"\n", "path": "autosklearn/pipeline/regression.py"}]}
| 3,924 | 409 |
gh_patches_debug_8811
|
rasdani/github-patches
|
git_diff
|
psf__black-2816
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test for `A᧚ = 0`
Black v19.10b0 fails to parse certain assignments involving unicode identifiers - [playground link here](https://black.now.sh/?version=stable&state=_Td6WFoAAATm1rRGAgAhARYAAAB0L-Wj4ABLADtdAD2IimZxl1N_WjMy7A7oUimP5kl9tNitfjGTgMhZej2xgomiikPHniF7YMrqeF7JYab2JGKtxYQLJtMAAACEQNE3-XEpLQABV0wDcxaqH7bzfQEAAAAABFla).
```python
A᧚
A፩
```
This code is in fact valid Python, as you can confirm by pasting it into a repl or with `compile("A\u19da = 0")`.
Found, as with most of my bugs, via [Hypothesmith](https://github.com/Zac-HD/hypothesmith). Given that this applies to multiple unicode digit characters, it might be due to unicode digits in identifiers?
</issue>
<code>
[start of fuzz.py]
1 """Property-based tests for Black.
2
3 By Zac Hatfield-Dodds, based on my Hypothesmith tool for source code
4 generation. You can run this file with `python`, `pytest`, or (soon)
5 a coverage-guided fuzzer I'm working on.
6 """
7
8 import re
9
10 import hypothesmith
11 from hypothesis import HealthCheck, given, settings, strategies as st
12
13 import black
14 from blib2to3.pgen2.tokenize import TokenError
15
16
17 # This test uses the Hypothesis and Hypothesmith libraries to generate random
18 # syntatically-valid Python source code and run Black in odd modes.
19 @settings(
20 max_examples=1000, # roughly 1k tests/minute, or half that under coverage
21 derandomize=True, # deterministic mode to avoid CI flakiness
22 deadline=None, # ignore Hypothesis' health checks; we already know that
23 suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy.
24 )
25 @given(
26 # Note that while Hypothesmith might generate code unlike that written by
27 # humans, it's a general test that should pass for any *valid* source code.
28 # (so e.g. running it against code scraped of the internet might also help)
29 src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(),
30 # Using randomly-varied modes helps us to exercise less common code paths.
31 mode=st.builds(
32 black.FileMode,
33 line_length=st.just(88) | st.integers(0, 200),
34 string_normalization=st.booleans(),
35 preview=st.booleans(),
36 is_pyi=st.booleans(),
37 magic_trailing_comma=st.booleans(),
38 ),
39 )
40 def test_idempotent_any_syntatically_valid_python(
41 src_contents: str, mode: black.FileMode
42 ) -> None:
43 # Before starting, let's confirm that the input string is valid Python:
44 compile(src_contents, "<string>", "exec") # else the bug is in hypothesmith
45
46 # Then format the code...
47 try:
48 dst_contents = black.format_str(src_contents, mode=mode)
49 except black.InvalidInput:
50 # This is a bug - if it's valid Python code, as above, Black should be
51 # able to cope with it. See issues #970, #1012, #1358, and #1557.
52 # TODO: remove this try-except block when issues are resolved.
53 return
54 except TokenError as e:
55 if ( # Special-case logic for backslashes followed by newlines or end-of-input
56 e.args[0] == "EOF in multi-line statement"
57 and re.search(r"\\($|\r?\n)", src_contents) is not None
58 ):
59 # This is a bug - if it's valid Python code, as above, Black should be
60 # able to cope with it. See issue #1012.
61 # TODO: remove this block when the issue is resolved.
62 return
63 raise
64
65 # And check that we got equivalent and stable output.
66 black.assert_equivalent(src_contents, dst_contents)
67 black.assert_stable(src_contents, dst_contents, mode=mode)
68
69 # Future test: check that pure-python and mypyc versions of black
70 # give identical output for identical input?
71
72
73 if __name__ == "__main__":
74 # Run tests, including shrinking and reporting any known failures.
75 test_idempotent_any_syntatically_valid_python()
76
77 # If Atheris is available, run coverage-guided fuzzing.
78 # (if you want only bounded fuzzing, just use `pytest fuzz.py`)
79 try:
80 import sys
81 import atheris
82 except ImportError:
83 pass
84 else:
85 test = test_idempotent_any_syntatically_valid_python
86 atheris.Setup(sys.argv, test.hypothesis.fuzz_one_input)
87 atheris.Fuzz()
88
[end of fuzz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fuzz.py b/fuzz.py
--- a/fuzz.py
+++ b/fuzz.py
@@ -48,7 +48,7 @@
dst_contents = black.format_str(src_contents, mode=mode)
except black.InvalidInput:
# This is a bug - if it's valid Python code, as above, Black should be
- # able to cope with it. See issues #970, #1012, #1358, and #1557.
+ # able to cope with it. See issues #970, #1012
# TODO: remove this try-except block when issues are resolved.
return
except TokenError as e:
|
{"golden_diff": "diff --git a/fuzz.py b/fuzz.py\n--- a/fuzz.py\n+++ b/fuzz.py\n@@ -48,7 +48,7 @@\n dst_contents = black.format_str(src_contents, mode=mode)\n except black.InvalidInput:\n # This is a bug - if it's valid Python code, as above, Black should be\n- # able to cope with it. See issues #970, #1012, #1358, and #1557.\n+ # able to cope with it. See issues #970, #1012\n # TODO: remove this try-except block when issues are resolved.\n return\n except TokenError as e:\n", "issue": "Add test for `A\u19da = 0`\nBlack v19.10b0 fails to parse certain assignments involving unicode identifiers - [playground link here](https://black.now.sh/?version=stable&state=_Td6WFoAAATm1rRGAgAhARYAAAB0L-Wj4ABLADtdAD2IimZxl1N_WjMy7A7oUimP5kl9tNitfjGTgMhZej2xgomiikPHniF7YMrqeF7JYab2JGKtxYQLJtMAAACEQNE3-XEpLQABV0wDcxaqH7bzfQEAAAAABFla).\r\n\r\n```python\r\nA\u19da\r\nA\u1369\r\n```\r\n\r\nThis code is in fact valid Python, as you can confirm by pasting it into a repl or with `compile(\"A\\u19da = 0\")`.\r\n\r\nFound, as with most of my bugs, via [Hypothesmith](https://github.com/Zac-HD/hypothesmith). Given that this applies to multiple unicode digit characters, it might be due to unicode digits in identifiers?\n", "before_files": [{"content": "\"\"\"Property-based tests for Black.\n\nBy Zac Hatfield-Dodds, based on my Hypothesmith tool for source code\ngeneration. You can run this file with `python`, `pytest`, or (soon)\na coverage-guided fuzzer I'm working on.\n\"\"\"\n\nimport re\n\nimport hypothesmith\nfrom hypothesis import HealthCheck, given, settings, strategies as st\n\nimport black\nfrom blib2to3.pgen2.tokenize import TokenError\n\n\n# This test uses the Hypothesis and Hypothesmith libraries to generate random\n# syntatically-valid Python source code and run Black in odd modes.\n@settings(\n max_examples=1000, # roughly 1k tests/minute, or half that under coverage\n derandomize=True, # deterministic mode to avoid CI flakiness\n deadline=None, # ignore Hypothesis' health checks; we already know that\n suppress_health_check=HealthCheck.all(), # this is slow and filter-heavy.\n)\n@given(\n # Note that while Hypothesmith might generate code unlike that written by\n # humans, it's a general test that should pass for any *valid* source code.\n # (so e.g. running it against code scraped of the internet might also help)\n src_contents=hypothesmith.from_grammar() | hypothesmith.from_node(),\n # Using randomly-varied modes helps us to exercise less common code paths.\n mode=st.builds(\n black.FileMode,\n line_length=st.just(88) | st.integers(0, 200),\n string_normalization=st.booleans(),\n preview=st.booleans(),\n is_pyi=st.booleans(),\n magic_trailing_comma=st.booleans(),\n ),\n)\ndef test_idempotent_any_syntatically_valid_python(\n src_contents: str, mode: black.FileMode\n) -> None:\n # Before starting, let's confirm that the input string is valid Python:\n compile(src_contents, \"<string>\", \"exec\") # else the bug is in hypothesmith\n\n # Then format the code...\n try:\n dst_contents = black.format_str(src_contents, mode=mode)\n except black.InvalidInput:\n # This is a bug - if it's valid Python code, as above, Black should be\n # able to cope with it. See issues #970, #1012, #1358, and #1557.\n # TODO: remove this try-except block when issues are resolved.\n return\n except TokenError as e:\n if ( # Special-case logic for backslashes followed by newlines or end-of-input\n e.args[0] == \"EOF in multi-line statement\"\n and re.search(r\"\\\\($|\\r?\\n)\", src_contents) is not None\n ):\n # This is a bug - if it's valid Python code, as above, Black should be\n # able to cope with it. See issue #1012.\n # TODO: remove this block when the issue is resolved.\n return\n raise\n\n # And check that we got equivalent and stable output.\n black.assert_equivalent(src_contents, dst_contents)\n black.assert_stable(src_contents, dst_contents, mode=mode)\n\n # Future test: check that pure-python and mypyc versions of black\n # give identical output for identical input?\n\n\nif __name__ == \"__main__\":\n # Run tests, including shrinking and reporting any known failures.\n test_idempotent_any_syntatically_valid_python()\n\n # If Atheris is available, run coverage-guided fuzzing.\n # (if you want only bounded fuzzing, just use `pytest fuzz.py`)\n try:\n import sys\n import atheris\n except ImportError:\n pass\n else:\n test = test_idempotent_any_syntatically_valid_python\n atheris.Setup(sys.argv, test.hypothesis.fuzz_one_input)\n atheris.Fuzz()\n", "path": "fuzz.py"}]}
| 1,824 | 161 |
gh_patches_debug_52
|
rasdani/github-patches
|
git_diff
|
Anselmoo__spectrafit-655
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs]: Using builtin release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "0.16.4"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "0.16.4"
+__version__ = "1.0.0a0"
|
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"0.16.4\"\n+__version__ = \"1.0.0a0\"\n", "issue": "[Docs]: Using builtin release drafter\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nhttps://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"0.16.4\"\n", "path": "spectrafit/__init__.py"}]}
| 652 | 95 |
gh_patches_debug_45047
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-750
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
collapse contributors with no answers in course detail pages
Contributors who didn't get any answers should be collapsed on the results pages, so that the empty answer lines are not shown.
This should also happen if there are answers in the database, but none of them can be seen by the current user.

</issue>
<code>
[start of evap/results/views.py]
1 from django.core.exceptions import PermissionDenied
2 from django.shortcuts import get_object_or_404, render
3 from django.contrib.auth.decorators import login_required
4
5 from evap.evaluation.models import Semester, Degree, Contribution
6 from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult
7
8
9 from collections import OrderedDict, namedtuple
10
11
12 @login_required
13 def index(request):
14 semesters = Semester.get_all_with_published_courses()
15
16 return render(request, "results_index.html", dict(semesters=semesters))
17
18
19 @login_required
20 def semester_detail(request, semester_id):
21 semester = get_object_or_404(Semester, id=semester_id)
22 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
23
24 # annotate each course object with its grades
25 for course in courses:
26 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
27
28 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
29
30 courses_by_degree = OrderedDict()
31 for degree in Degree.objects.all():
32 courses_by_degree[degree] = CourseTuple([], [])
33 for course in courses:
34 if course.is_single_result():
35 for degree in course.degrees.all():
36 section = calculate_results(course)[0]
37 result = section.results[0]
38 courses_by_degree[degree].single_results.append((course, result))
39 else:
40 for degree in course.degrees.all():
41 courses_by_degree[degree].courses.append(course)
42
43 template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff)
44 return render(request, "results_semester_detail.html", template_data)
45
46
47 @login_required
48 def course_detail(request, semester_id, course_id):
49 semester = get_object_or_404(Semester, id=semester_id)
50 course = get_object_or_404(semester.course_set, id=course_id)
51
52 if not course.can_user_see_results(request.user):
53 raise PermissionDenied
54
55 sections = calculate_results(course)
56
57 public_view = request.GET.get('public_view', 'false') # default: show own view
58 public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean
59
60 represented_users = list(request.user.represented_users.all())
61 represented_users.append(request.user)
62
63 for section in sections:
64 results = []
65 for result in section.results:
66 if isinstance(result, TextResult):
67 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
68 if answers:
69 results.append(TextResult(question=result.question, answers=answers))
70 else:
71 results.append(result)
72 section.results[:] = results
73
74 # filter empty sections and group by contributor
75 course_sections = []
76 contributor_sections = OrderedDict()
77 for section in sections:
78 if not section.results:
79 continue
80 if section.contributor is None:
81 course_sections.append(section)
82 else:
83 contributor_sections.setdefault(section.contributor, []).append(section)
84
85 # show a warning if course is still in evaluation (for staff preview)
86 evaluation_warning = course.state != 'published'
87
88 # results for a course might not be visible because there are not enough answers
89 # but it can still be "published" e.g. to show the comment results to contributors.
90 # users who can open the results page see a warning message in this case
91 sufficient_votes_warning = not course.can_publish_grades
92
93 show_grades = request.user.is_staff or course.can_publish_grades
94
95 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
96
97 template_data = dict(
98 course=course,
99 course_sections=course_sections,
100 contributor_sections=contributor_sections,
101 evaluation_warning=evaluation_warning,
102 sufficient_votes_warning=sufficient_votes_warning,
103 show_grades=show_grades,
104 staff=request.user.is_staff,
105 contributor=course.is_user_contributor_or_delegate(request.user),
106 can_download_grades=request.user.can_download_grades,
107 public_view=public_view)
108 return render(request, "results_course_detail.html", template_data)
109
110 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
111 if public_view:
112 return False
113 if user.is_staff:
114 return True
115 contributor = text_answer.contribution.contributor
116 if text_answer.is_private:
117 return contributor == user
118 if text_answer.is_published:
119 if contributor in represented_users:
120 return True
121 if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
122 return True
123 if text_answer.contribution.is_general and \
124 text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
125 return True
126
127 return False
128
[end of evap/results/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -3,8 +3,7 @@
from django.contrib.auth.decorators import login_required
from evap.evaluation.models import Semester, Degree, Contribution
-from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult
-
+from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult
from collections import OrderedDict, namedtuple
@@ -21,7 +20,7 @@
semester = get_object_or_404(Semester, id=semester_id)
courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
- # annotate each course object with its grades
+ # Annotate each course object with its grades.
for course in courses:
course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
@@ -54,8 +53,8 @@
sections = calculate_results(course)
- public_view = request.GET.get('public_view', 'false') # default: show own view
- public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean
+ public_view = request.GET.get('public_view', 'false') # Default: show own view.
+ public_view = {'true': True, 'false': False}.get(public_view.lower()) # Convert parameter to boolean.
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
@@ -71,7 +70,7 @@
results.append(result)
section.results[:] = results
- # filter empty sections and group by contributor
+ # Filter empty sections and group by contributor.
course_sections = []
contributor_sections = OrderedDict()
for section in sections:
@@ -80,14 +79,21 @@
if section.contributor is None:
course_sections.append(section)
else:
- contributor_sections.setdefault(section.contributor, []).append(section)
+ contributor_sections.setdefault(section.contributor,
+ {'total_votes': 0, 'sections': []})['sections'].append(section)
+
+ # Sum up all Sections for this contributor.
+ # If section is not a RatingResult:
+ # Add 1 as we assume it is a TextResult or something similar that should be displayed.
+ contributor_sections[section.contributor]['total_votes'] +=\
+ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
- # show a warning if course is still in evaluation (for staff preview)
+ # Show a warning if course is still in evaluation (for staff preview).
evaluation_warning = course.state != 'published'
- # results for a course might not be visible because there are not enough answers
+ # Results for a course might not be visible because there are not enough answers
# but it can still be "published" e.g. to show the comment results to contributors.
- # users who can open the results page see a warning message in this case
+ # Users who can open the results page see a warning message in this case.
sufficient_votes_warning = not course.can_publish_grades
show_grades = request.user.is_staff or course.can_publish_grades
@@ -107,6 +113,7 @@
public_view=public_view)
return render(request, "results_course_detail.html", template_data)
+
def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
if public_view:
return False
@@ -118,10 +125,11 @@
if text_answer.is_published:
if contributor in represented_users:
return True
- if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
+ if text_answer.contribution.course.contributions.filter(
+ contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
return True
- if text_answer.contribution.is_general and \
- text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
+ if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
+ contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
return True
return False
|
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -3,8 +3,7 @@\n from django.contrib.auth.decorators import login_required\n \n from evap.evaluation.models import Semester, Degree, Contribution\n-from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult\n-\n+from evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult\n \n from collections import OrderedDict, namedtuple\n \n@@ -21,7 +20,7 @@\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n \n- # annotate each course object with its grades\n+ # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n \n@@ -54,8 +53,8 @@\n \n sections = calculate_results(course)\n \n- public_view = request.GET.get('public_view', 'false') # default: show own view\n- public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean\n+ public_view = request.GET.get('public_view', 'false') # Default: show own view.\n+ public_view = {'true': True, 'false': False}.get(public_view.lower()) # Convert parameter to boolean.\n \n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n@@ -71,7 +70,7 @@\n results.append(result)\n section.results[:] = results\n \n- # filter empty sections and group by contributor\n+ # Filter empty sections and group by contributor.\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n@@ -80,14 +79,21 @@\n if section.contributor is None:\n course_sections.append(section)\n else:\n- contributor_sections.setdefault(section.contributor, []).append(section)\n+ contributor_sections.setdefault(section.contributor,\n+ {'total_votes': 0, 'sections': []})['sections'].append(section)\n+\n+ # Sum up all Sections for this contributor.\n+ # If section is not a RatingResult:\n+ # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n+ contributor_sections[section.contributor]['total_votes'] +=\\\n+ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n \n- # show a warning if course is still in evaluation (for staff preview)\n+ # Show a warning if course is still in evaluation (for staff preview).\n evaluation_warning = course.state != 'published'\n \n- # results for a course might not be visible because there are not enough answers\n+ # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n- # users who can open the results page see a warning message in this case\n+ # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n \n show_grades = request.user.is_staff or course.can_publish_grades\n@@ -107,6 +113,7 @@\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n \n+\n def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n@@ -118,10 +125,11 @@\n if text_answer.is_published:\n if contributor in represented_users:\n return True\n- if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n+ if text_answer.contribution.course.contributions.filter(\n+ contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n- if text_answer.contribution.is_general and \\\n- text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n+ if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n+ contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n \n return False\n", "issue": "collapse contributors with no answers in course detail pages\nContributors who didn't get any answers should be collapsed on the results pages, so that the empty answer lines are not shown.\nThis should also happen if there are answers in the database, but none of them can be seen by the current user.\n\n\n\n", "before_files": [{"content": "from django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.evaluation.tools import calculate_results, calculate_average_grades_and_deviation, TextResult\n\n\nfrom collections import OrderedDict, namedtuple\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n # annotate each course object with its grades\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result():\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n public_view = request.GET.get('public_view', 'false') # default: show own view\n public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty sections and group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor, []).append(section)\n\n # show a warning if course is still in evaluation (for staff preview)\n evaluation_warning = course.state != 'published'\n\n # results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # users who can open the results page see a warning message in this case\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_staff or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n staff=request.user.is_staff,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if user.is_staff:\n return True\n contributor = text_answer.contribution.contributor\n if text_answer.is_private:\n return contributor == user\n if text_answer.is_published:\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and \\\n text_answer.contribution.course.contributions.filter(contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
| 2,023 | 1,003 |
gh_patches_debug_631
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-2042
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.121
On the docket:
+ [x] Building Pex with requirements.txt that includes local directory + Python version specifier fails #2037
+ [x] Failed to resolve compatible distributions when building Pex from .whl with local dependencies #2038
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.120"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.120"
+__version__ = "2.1.121"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.120\"\n+__version__ = \"2.1.121\"\n", "issue": "Release 2.1.121\nOn the docket:\r\n+ [x] Building Pex with requirements.txt that includes local directory + Python version specifier fails #2037 \r\n+ [x] Failed to resolve compatible distributions when building Pex from .whl with local dependencies #2038 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.120\"\n", "path": "pex/version.py"}]}
| 651 | 98 |
gh_patches_debug_1715
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-5054
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `__future__.annotations` everywhere in the Optuna code base
### Motivation
Optuna drops Python 3.6 from v3.1, so we can use `__future__.annotations`, which simplifies the code base. See [PEP 563](https://peps.python.org/pep-0563/), [PEP584](https://peps.python.org/pep-0584/), [PEP 585](https://peps.python.org/pep-0585/), and [PEP 604](https://peps.python.org/pep-0604/) for more details. This issue suggests to use the module and simplifies the code base.
### Suggestion
Use `__future__.annotations` for each file and simplify the type annotations. The list of classes whose type annotations can be simplified is [here](https://peps.python.org/pep-0585/#implementation). The list of files where the `__future__.annotations` can be used is as follows. In order to reduce review costs and to encourage more contributors to work on it, please, as a rule, fix one file per PR.
- [x] optuna/_convert_positional_args.py
- [x] optuna/visualization/_optimization_history.py
- [x] optuna/visualization/_hypervolume_history.py
- [x] optuna/visualization/_edf.py
- [x] optuna/visualization/_pareto_front.py
- [x] optuna/visualization/matplotlib/_optimization_history.py
- [x] optuna/visualization/matplotlib/_hypervolume_history.py
- [x] optuna/visualization/matplotlib/_edf.py
- [x] optuna/visualization/matplotlib/_pareto_front.py
- [x] optuna/visualization/matplotlib/_contour.py
- [x] optuna/visualization/_utils.py
- [x] optuna/logging.py
- [ ] optuna/storages/_base.py
- [ ] optuna/storages/_cached_storage.py
- [ ] optuna/storages/__init__.py
- [ ] optuna/storages/_heartbeat.py
- [ ] optuna/storages/_in_memory.py
- [ ] optuna/storages/_rdb/models.py
- [ ] optuna/storages/_rdb/storage.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.c.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.d.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.a.py
- [ ] optuna/storages/_journal/file.py
- [ ] optuna/storages/_journal/redis.py
- [ ] optuna/storages/_journal/storage.py
- [ ] optuna/storages/_journal/base.py
- [ ] optuna/study/_dataframe.py
- [ ] optuna/study/_optimize.py
- [ ] optuna/study/_tell.py
- [ ] optuna/study/_multi_objective.py
- [ ] optuna/study/_frozen.py
- [ ] optuna/study/study.py
- [ ] optuna/study/_study_summary.py
- [ ] optuna/search_space/group_decomposed.py
- [ ] optuna/search_space/intersection.py
- [ ] optuna/_typing.py
- [ ] optuna/_deprecated.py
- [ ] optuna/pruners/_hyperband.py
- [ ] optuna/pruners/_patient.py
- [ ] optuna/pruners/_successive_halving.py
- [ ] optuna/pruners/_percentile.py
- [ ] optuna/pruners/_threshold.py
- [ ] optuna/trial/_base.py
- [ ] optuna/trial/_fixed.py
- [ ] optuna/trial/_trial.py
- [ ] optuna/trial/_frozen.py
- [ ] optuna/integration/cma.py
- [ ] optuna/integration/shap.py
- [ ] optuna/integration/lightgbm.py
- [ ] optuna/integration/pytorch_distributed.py
- [ ] optuna/integration/_lightgbm_tuner/optimize.py
- [ ] optuna/integration/_lightgbm_tuner/alias.py
- [ ] optuna/integration/mlflow.py
- [ ] optuna/integration/wandb.py
- [ ] optuna/integration/catboost.py
- [ ] optuna/integration/skopt.py
- [ ] optuna/integration/botorch.py
- [ ] optuna/integration/dask.py
- [x] optuna/integration/sklearn.py
- [ ] optuna/integration/tensorboard.py
- [ ] optuna/terminator/callback.py
- [ ] optuna/terminator/terminator.py
- [ ] optuna/terminator/improvement/_preprocessing.py
- [ ] optuna/terminator/improvement/gp/botorch.py
- [ ] optuna/terminator/improvement/gp/base.py
- [ ] optuna/terminator/improvement/evaluator.py
- [ ] optuna/importance/_base.py
- [ ] optuna/importance/_mean_decrease_impurity.py
- [ ] optuna/importance/__init__.py
- [ ] optuna/importance/_fanova/_fanova.py
- [ ] optuna/importance/_fanova/_evaluator.py
- [ ] optuna/importance/_fanova/_tree.py
- [ ] optuna/_imports.py
- [ ] optuna/testing/tempfile_pool.py
- [ ] optuna/testing/threading.py
- [ ] optuna/testing/distributions.py
- [ ] optuna/testing/samplers.py
- [ ] optuna/testing/storages.py
- [ ] optuna/distributions.py
- [ ] optuna/cli.py
- [ ] optuna/multi_objective/visualization/_pareto_front.py
- [ ] optuna/multi_objective/trial.py
- [ ] optuna/multi_objective/samplers/_base.py
- [ ] optuna/multi_objective/samplers/_nsga2.py
- [ ] optuna/multi_objective/samplers/_adapter.py
- [ ] optuna/multi_objective/samplers/_random.py
- [ ] optuna/multi_objective/samplers/_motpe.py
- [ ] optuna/multi_objective/study.py
- [ ] optuna/_experimental.py
- [ ] optuna/samplers/_base.py
- [ ] optuna/samplers/nsgaii/_crossovers/_undx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_spx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_sbx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_vsbx.py
- [ ] optuna/samplers/nsgaii/_sampler.py
- [ ] optuna/samplers/nsgaii/_crossover.py
- [ ] optuna/samplers/_search_space/intersection.py
- [ ] optuna/samplers/_qmc.py
- [ ] optuna/samplers/_tpe/probability_distributions.py
- [ ] optuna/samplers/_tpe/_truncnorm.py
- [ ] optuna/samplers/_tpe/multi_objective_sampler.py
- [ ] optuna/samplers/_tpe/parzen_estimator.py
- [ ] optuna/samplers/_tpe/sampler.py
- [ ] optuna/samplers/_random.py
- [ ] optuna/samplers/_cmaes.py
- [ ] optuna/samplers/_partial_fixed.py
- [ ] optuna/samplers/_brute_force.py
- [ ] optuna/samplers/_nsgaiii.py
- [ ] optuna/samplers/_grid.py
- [ ] optuna/_hypervolume/wfg.py
- [ ] optuna/_hypervolume/hssp.py
- [ ] optuna/progress_bar.py
- [ ] optuna/_transform.py
- [ ] optuna/_callbacks.py
- [ ] tests/multi_objective_tests/test_study.py
- [ ] tests/multi_objective_tests/samplers_tests/test_motpe.py
- [ ] tests/multi_objective_tests/samplers_tests/test_nsga2.py
- [ ] tests/multi_objective_tests/test_trial.py
- [ ] tests/multi_objective_tests/visualization_tests/test_pareto_front.py
- [ ] tests/trial_tests/test_frozen.py
- [ ] tests/trial_tests/test_trials.py
- [ ] tests/trial_tests/test_trial.py
- [ ] tests/pruners_tests/test_percentile.py
- [ ] tests/pruners_tests/test_median.py
- [ ] tests/pruners_tests/test_patient.py
- [ ] tests/pruners_tests/test_successive_halving.py
- [ ] tests/study_tests/test_optimize.py
- [ ] tests/study_tests/test_study.py
- [ ] tests/hypervolume_tests/test_hssp.py
- [x] tests/integration_tests/test_skopt.py
- [x] tests/integration_tests/test_pytorch_lightning.py
- [ ] tests/integration_tests/test_shap.py
- [ ] tests/integration_tests/test_cma.py
- [ ] tests/integration_tests/test_pytorch_distributed.py
- [ ] tests/integration_tests/lightgbm_tuner_tests/test_optimize.py
- [ ] tests/integration_tests/lightgbm_tuner_tests/test_alias.py
- [ ] tests/integration_tests/test_botorch.py
- [ ] tests/integration_tests/test_mlflow.py
- [ ] tests/integration_tests/test_mxnet.py
- [ ] tests/integration_tests/test_wandb.py
- [ ] tests/importance_tests/fanova_tests/test_tree.py
- [ ] tests/importance_tests/test_mean_decrease_impurity.py
- [ ] tests/importance_tests/test_fanova.py
- [ ] tests/importance_tests/test_init.py
- [ ] tests/test_convert_positional_args.py
- [ ] tests/test_deprecated.py
- [ ] tests/storages_tests/test_journal.py
- [ ] tests/storages_tests/test_heartbeat.py
- [ ] tests/storages_tests/test_storages.py
- [ ] tests/storages_tests/rdb_tests/test_storage.py
- [ ] tests/storages_tests/rdb_tests/create_db.py
- [ ] tests/storages_tests/test_with_server.py
- [ ] tests/samplers_tests/test_grid.py
- [ ] tests/samplers_tests/tpe_tests/test_parzen_estimator.py
- [ ] tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py
- [ ] tests/samplers_tests/tpe_tests/test_sampler.py
- [ ] tests/samplers_tests/test_cmaes.py
- [ ] tests/samplers_tests/test_samplers.py
- [x] tests/samplers_tests/test_nsgaii.py
- [x] tests/samplers_tests/test_nsgaiii.py
- [ ] tests/samplers_tests/test_qmc.py
- [ ] tests/test_distributions.py
- [ ] tests/test_multi_objective.py
- [ ] tests/test_cli.py
- [ ] tests/visualization_tests/test_hypervolume_history.py
- [ ] tests/visualization_tests/test_pareto_front.py
- [ ] tests/terminator_tests/improvement_tests/test_evaluator.py
- [ ] benchmarks/kurobako/problems/wfg/transformation_functions.py
- [ ] benchmarks/bayesmark/report_bayesmark.py
- [ ] benchmarks/bayesmark/optuna_optimizer.py
### Additional context (optional)
The above list is generated by the following script.
<details>
<summary>script</summary>
```python
import os
import pathlib
PATTERS = [
"from typing import Union",
"from typing import Optional",
"from typing import Tuple",
"from typing import List",
"from typing import Dict",
"from typing import Set",
"from typing import FrozenSet",
"from typing import Type",
"from typing import FrozenSet",
"from typing import Sequence",
]
def get_filenames_to_be_simplified(dir_path):
ret = []
for f in os.listdir(dir_path):
file_path = os.path.join(dir_path, f)
if not os.path.isfile(file_path):
ret.extend(get_filenames_to_be_simplified(file_path))
else:
try:
with open(file_path) as fd:
contents = fd.read()
if any([s in contents for s in PATTERS]):
ret.append(str(file_path))
except UnicodeDecodeError as e:
pass
return ret
def main():
dirs = ["optuna", "tests", "benchmarks"]
for dir_name in dirs:
filenames = get_filenames_to_be_simplified(pathlib.Path(dir_name))
for filename in filenames:
print(f"- [ ] {filename}")
if __name__ == "__main__":
main()
```
</details>
</issue>
<code>
[start of optuna/_typing.py]
1 from typing import Mapping
2 from typing import Sequence
3 from typing import Union
4
5
6 JSONSerializable = Union[
7 Mapping[str, "JSONSerializable"],
8 Sequence["JSONSerializable"],
9 str,
10 int,
11 float,
12 bool,
13 None,
14 ]
15
16
17 __all__ = ["JSONSerializable"]
18
[end of optuna/_typing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/_typing.py b/optuna/_typing.py
--- a/optuna/_typing.py
+++ b/optuna/_typing.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Mapping
from typing import Sequence
from typing import Union
@@ -13,5 +15,4 @@
None,
]
-
__all__ = ["JSONSerializable"]
|
{"golden_diff": "diff --git a/optuna/_typing.py b/optuna/_typing.py\n--- a/optuna/_typing.py\n+++ b/optuna/_typing.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n from typing import Mapping\n from typing import Sequence\n from typing import Union\n@@ -13,5 +15,4 @@\n None,\n ]\n \n-\n __all__ = [\"JSONSerializable\"]\n", "issue": "Use `__future__.annotations` everywhere in the Optuna code base\n### Motivation\r\n\r\nOptuna drops Python 3.6 from v3.1, so we can use `__future__.annotations`, which simplifies the code base. See [PEP 563](https://peps.python.org/pep-0563/), [PEP584](https://peps.python.org/pep-0584/), [PEP 585](https://peps.python.org/pep-0585/), and [PEP 604](https://peps.python.org/pep-0604/) for more details. This issue suggests to use the module and simplifies the code base.\r\n\r\n### Suggestion\r\n\r\nUse `__future__.annotations` for each file and simplify the type annotations. The list of classes whose type annotations can be simplified is [here](https://peps.python.org/pep-0585/#implementation). The list of files where the `__future__.annotations` can be used is as follows. In order to reduce review costs and to encourage more contributors to work on it, please, as a rule, fix one file per PR.\r\n\r\n- [x] optuna/_convert_positional_args.py\r\n- [x] optuna/visualization/_optimization_history.py\r\n- [x] optuna/visualization/_hypervolume_history.py\r\n- [x] optuna/visualization/_edf.py\r\n- [x] optuna/visualization/_pareto_front.py\r\n- [x] optuna/visualization/matplotlib/_optimization_history.py\r\n- [x] optuna/visualization/matplotlib/_hypervolume_history.py\r\n- [x] optuna/visualization/matplotlib/_edf.py\r\n- [x] optuna/visualization/matplotlib/_pareto_front.py\r\n- [x] optuna/visualization/matplotlib/_contour.py\r\n- [x] optuna/visualization/_utils.py\r\n- [x] optuna/logging.py\r\n- [ ] optuna/storages/_base.py\r\n- [ ] optuna/storages/_cached_storage.py\r\n- [ ] optuna/storages/__init__.py\r\n- [ ] optuna/storages/_heartbeat.py\r\n- [ ] optuna/storages/_in_memory.py\r\n- [ ] optuna/storages/_rdb/models.py\r\n- [ ] optuna/storages/_rdb/storage.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.c.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.d.py\r\n- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.a.py\r\n- [ ] optuna/storages/_journal/file.py\r\n- [ ] optuna/storages/_journal/redis.py\r\n- [ ] optuna/storages/_journal/storage.py\r\n- [ ] optuna/storages/_journal/base.py\r\n- [ ] optuna/study/_dataframe.py\r\n- [ ] optuna/study/_optimize.py\r\n- [ ] optuna/study/_tell.py\r\n- [ ] optuna/study/_multi_objective.py\r\n- [ ] optuna/study/_frozen.py\r\n- [ ] optuna/study/study.py\r\n- [ ] optuna/study/_study_summary.py\r\n- [ ] optuna/search_space/group_decomposed.py\r\n- [ ] optuna/search_space/intersection.py\r\n- [ ] optuna/_typing.py\r\n- [ ] optuna/_deprecated.py\r\n- [ ] optuna/pruners/_hyperband.py\r\n- [ ] optuna/pruners/_patient.py\r\n- [ ] optuna/pruners/_successive_halving.py\r\n- [ ] optuna/pruners/_percentile.py\r\n- [ ] optuna/pruners/_threshold.py\r\n- [ ] optuna/trial/_base.py\r\n- [ ] optuna/trial/_fixed.py\r\n- [ ] optuna/trial/_trial.py\r\n- [ ] optuna/trial/_frozen.py\r\n- [ ] optuna/integration/cma.py\r\n- [ ] optuna/integration/shap.py\r\n- [ ] optuna/integration/lightgbm.py\r\n- [ ] optuna/integration/pytorch_distributed.py\r\n- [ ] optuna/integration/_lightgbm_tuner/optimize.py\r\n- [ ] optuna/integration/_lightgbm_tuner/alias.py\r\n- [ ] optuna/integration/mlflow.py\r\n- [ ] optuna/integration/wandb.py\r\n- [ ] optuna/integration/catboost.py\r\n- [ ] optuna/integration/skopt.py\r\n- [ ] optuna/integration/botorch.py\r\n- [ ] optuna/integration/dask.py\r\n- [x] optuna/integration/sklearn.py\r\n- [ ] optuna/integration/tensorboard.py\r\n- [ ] optuna/terminator/callback.py\r\n- [ ] optuna/terminator/terminator.py\r\n- [ ] optuna/terminator/improvement/_preprocessing.py\r\n- [ ] optuna/terminator/improvement/gp/botorch.py\r\n- [ ] optuna/terminator/improvement/gp/base.py\r\n- [ ] optuna/terminator/improvement/evaluator.py\r\n- [ ] optuna/importance/_base.py\r\n- [ ] optuna/importance/_mean_decrease_impurity.py\r\n- [ ] optuna/importance/__init__.py\r\n- [ ] optuna/importance/_fanova/_fanova.py\r\n- [ ] optuna/importance/_fanova/_evaluator.py\r\n- [ ] optuna/importance/_fanova/_tree.py\r\n- [ ] optuna/_imports.py\r\n- [ ] optuna/testing/tempfile_pool.py\r\n- [ ] optuna/testing/threading.py\r\n- [ ] optuna/testing/distributions.py\r\n- [ ] optuna/testing/samplers.py\r\n- [ ] optuna/testing/storages.py\r\n- [ ] optuna/distributions.py\r\n- [ ] optuna/cli.py\r\n- [ ] optuna/multi_objective/visualization/_pareto_front.py\r\n- [ ] optuna/multi_objective/trial.py\r\n- [ ] optuna/multi_objective/samplers/_base.py\r\n- [ ] optuna/multi_objective/samplers/_nsga2.py\r\n- [ ] optuna/multi_objective/samplers/_adapter.py\r\n- [ ] optuna/multi_objective/samplers/_random.py\r\n- [ ] optuna/multi_objective/samplers/_motpe.py\r\n- [ ] optuna/multi_objective/study.py\r\n- [ ] optuna/_experimental.py\r\n- [ ] optuna/samplers/_base.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_undx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_spx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_sbx.py\r\n- [ ] optuna/samplers/nsgaii/_crossovers/_vsbx.py\r\n- [ ] optuna/samplers/nsgaii/_sampler.py\r\n- [ ] optuna/samplers/nsgaii/_crossover.py\r\n- [ ] optuna/samplers/_search_space/intersection.py\r\n- [ ] optuna/samplers/_qmc.py\r\n- [ ] optuna/samplers/_tpe/probability_distributions.py\r\n- [ ] optuna/samplers/_tpe/_truncnorm.py\r\n- [ ] optuna/samplers/_tpe/multi_objective_sampler.py\r\n- [ ] optuna/samplers/_tpe/parzen_estimator.py\r\n- [ ] optuna/samplers/_tpe/sampler.py\r\n- [ ] optuna/samplers/_random.py\r\n- [ ] optuna/samplers/_cmaes.py\r\n- [ ] optuna/samplers/_partial_fixed.py\r\n- [ ] optuna/samplers/_brute_force.py\r\n- [ ] optuna/samplers/_nsgaiii.py\r\n- [ ] optuna/samplers/_grid.py\r\n- [ ] optuna/_hypervolume/wfg.py\r\n- [ ] optuna/_hypervolume/hssp.py\r\n- [ ] optuna/progress_bar.py\r\n- [ ] optuna/_transform.py\r\n- [ ] optuna/_callbacks.py\r\n- [ ] tests/multi_objective_tests/test_study.py\r\n- [ ] tests/multi_objective_tests/samplers_tests/test_motpe.py\r\n- [ ] tests/multi_objective_tests/samplers_tests/test_nsga2.py\r\n- [ ] tests/multi_objective_tests/test_trial.py\r\n- [ ] tests/multi_objective_tests/visualization_tests/test_pareto_front.py\r\n- [ ] tests/trial_tests/test_frozen.py\r\n- [ ] tests/trial_tests/test_trials.py\r\n- [ ] tests/trial_tests/test_trial.py\r\n- [ ] tests/pruners_tests/test_percentile.py\r\n- [ ] tests/pruners_tests/test_median.py\r\n- [ ] tests/pruners_tests/test_patient.py\r\n- [ ] tests/pruners_tests/test_successive_halving.py\r\n- [ ] tests/study_tests/test_optimize.py\r\n- [ ] tests/study_tests/test_study.py\r\n- [ ] tests/hypervolume_tests/test_hssp.py\r\n- [x] tests/integration_tests/test_skopt.py\r\n- [x] tests/integration_tests/test_pytorch_lightning.py\r\n- [ ] tests/integration_tests/test_shap.py\r\n- [ ] tests/integration_tests/test_cma.py\r\n- [ ] tests/integration_tests/test_pytorch_distributed.py\r\n- [ ] tests/integration_tests/lightgbm_tuner_tests/test_optimize.py\r\n- [ ] tests/integration_tests/lightgbm_tuner_tests/test_alias.py\r\n- [ ] tests/integration_tests/test_botorch.py\r\n- [ ] tests/integration_tests/test_mlflow.py\r\n- [ ] tests/integration_tests/test_mxnet.py\r\n- [ ] tests/integration_tests/test_wandb.py\r\n- [ ] tests/importance_tests/fanova_tests/test_tree.py\r\n- [ ] tests/importance_tests/test_mean_decrease_impurity.py\r\n- [ ] tests/importance_tests/test_fanova.py\r\n- [ ] tests/importance_tests/test_init.py\r\n- [ ] tests/test_convert_positional_args.py\r\n- [ ] tests/test_deprecated.py\r\n- [ ] tests/storages_tests/test_journal.py\r\n- [ ] tests/storages_tests/test_heartbeat.py\r\n- [ ] tests/storages_tests/test_storages.py\r\n- [ ] tests/storages_tests/rdb_tests/test_storage.py\r\n- [ ] tests/storages_tests/rdb_tests/create_db.py\r\n- [ ] tests/storages_tests/test_with_server.py\r\n- [ ] tests/samplers_tests/test_grid.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_parzen_estimator.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py\r\n- [ ] tests/samplers_tests/tpe_tests/test_sampler.py\r\n- [ ] tests/samplers_tests/test_cmaes.py\r\n- [ ] tests/samplers_tests/test_samplers.py\r\n- [x] tests/samplers_tests/test_nsgaii.py\r\n- [x] tests/samplers_tests/test_nsgaiii.py\r\n- [ ] tests/samplers_tests/test_qmc.py\r\n- [ ] tests/test_distributions.py\r\n- [ ] tests/test_multi_objective.py\r\n- [ ] tests/test_cli.py\r\n- [ ] tests/visualization_tests/test_hypervolume_history.py\r\n- [ ] tests/visualization_tests/test_pareto_front.py\r\n- [ ] tests/terminator_tests/improvement_tests/test_evaluator.py\r\n- [ ] benchmarks/kurobako/problems/wfg/transformation_functions.py\r\n- [ ] benchmarks/bayesmark/report_bayesmark.py\r\n- [ ] benchmarks/bayesmark/optuna_optimizer.py\r\n\r\n\r\n### Additional context (optional)\r\n\r\nThe above list is generated by the following script.\r\n\r\n<details>\r\n<summary>script</summary>\r\n\r\n```python\r\nimport os\r\nimport pathlib\r\n\r\n\r\nPATTERS = [\r\n \"from typing import Union\",\r\n \"from typing import Optional\",\r\n \"from typing import Tuple\",\r\n \"from typing import List\",\r\n \"from typing import Dict\",\r\n \"from typing import Set\",\r\n \"from typing import FrozenSet\",\r\n \"from typing import Type\",\r\n \"from typing import FrozenSet\",\r\n \"from typing import Sequence\",\r\n]\r\n\r\n\r\ndef get_filenames_to_be_simplified(dir_path):\r\n ret = []\r\n for f in os.listdir(dir_path):\r\n file_path = os.path.join(dir_path, f)\r\n if not os.path.isfile(file_path):\r\n ret.extend(get_filenames_to_be_simplified(file_path))\r\n else:\r\n try:\r\n with open(file_path) as fd:\r\n contents = fd.read()\r\n if any([s in contents for s in PATTERS]):\r\n ret.append(str(file_path))\r\n except UnicodeDecodeError as e:\r\n pass\r\n return ret\r\n\r\n\r\ndef main():\r\n dirs = [\"optuna\", \"tests\", \"benchmarks\"]\r\n\r\n for dir_name in dirs:\r\n filenames = get_filenames_to_be_simplified(pathlib.Path(dir_name))\r\n for filename in filenames:\r\n print(f\"- [ ] {filename}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\n</details>\n", "before_files": [{"content": "from typing import Mapping\nfrom typing import Sequence\nfrom typing import Union\n\n\nJSONSerializable = Union[\n Mapping[str, \"JSONSerializable\"],\n Sequence[\"JSONSerializable\"],\n str,\n int,\n float,\n bool,\n None,\n]\n\n\n__all__ = [\"JSONSerializable\"]\n", "path": "optuna/_typing.py"}]}
| 3,511 | 92 |
gh_patches_debug_25893
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-551
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use predicates to prevent access to items if no phase has started yet
With topicprio and polls items are created before any phase has been started.
On the project_detail page a according message and no content is shown.
This is checked via the `PhaseDispatchMixin`.
As for items it is not possible to use the `PhaseDispatchMixin` a new
predicate has been introduced which tests if a phase has been started
yet. Be aware, that this does not imply an active phase.
If no phase has been started yet access to the items should be prevented.
But if participation did finish the items should still be available.
See 362a8ce6a79888b2cc1dbdeb789107a156c5d15f for reference.
</issue>
<code>
[start of apps/polls/rules.py]
1 import rules
2 from rules.predicates import is_superuser
3
4 from adhocracy4.modules import predicates as module_predicates
5
6 from . import models
7
8 rules.add_perm(
9 'meinberlin_polls.change_poll',
10 is_superuser | module_predicates.is_context_initiator
11 )
12
13 rules.add_perm(
14 'meinberlin_polls.view_poll',
15 module_predicates.is_public_context
16 )
17
18 rules.add_perm(
19 'meinberlin_polls.comment_poll',
20 module_predicates.is_allowed_comment_item
21 )
22
23 rules.add_perm(
24 'meinberlin_polls.add_vote',
25 module_predicates.is_allowed_add_item(models.Vote)
26 )
27
28 rules.add_perm(
29 'meinberlin_polls.change_vote',
30 module_predicates.is_allowed_add_item(models.Vote)
31 )
32
[end of apps/polls/rules.py]
[start of apps/topicprio/rules.py]
1 import rules
2
3 from adhocracy4.modules import predicates as module_predicates
4
5 rules.add_perm(
6 'meinberlin_topicprio.add_topic',
7 module_predicates.is_project_admin
8 )
9
10 rules.add_perm(
11 'meinberlin_topicprio.change_topic',
12 module_predicates.is_project_admin
13 )
14
15 rules.add_perm(
16 'meinberlin_topicprio.rate_topic',
17 module_predicates.is_allowed_rate_item
18 )
19
20 rules.add_perm(
21 'meinberlin_topicprio.comment_topic',
22 module_predicates.is_allowed_comment_item
23 )
24
[end of apps/topicprio/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/polls/rules.py b/apps/polls/rules.py
--- a/apps/polls/rules.py
+++ b/apps/polls/rules.py
@@ -2,6 +2,7 @@
from rules.predicates import is_superuser
from adhocracy4.modules import predicates as module_predicates
+from apps.contrib import predicates as contrib_predicates
from . import models
@@ -12,7 +13,9 @@
rules.add_perm(
'meinberlin_polls.view_poll',
- module_predicates.is_public_context
+ (module_predicates.is_project_admin |
+ (module_predicates.is_allowed_view_item &
+ contrib_predicates.has_context_started))
)
rules.add_perm(
diff --git a/apps/topicprio/rules.py b/apps/topicprio/rules.py
--- a/apps/topicprio/rules.py
+++ b/apps/topicprio/rules.py
@@ -1,6 +1,7 @@
import rules
from adhocracy4.modules import predicates as module_predicates
+from apps.contrib import predicates as contrib_predicates
rules.add_perm(
'meinberlin_topicprio.add_topic',
@@ -12,6 +13,13 @@
module_predicates.is_project_admin
)
+rules.add_perm(
+ 'meinberlin_topicprio.view_topic',
+ (module_predicates.is_project_admin |
+ (module_predicates.is_allowed_view_item &
+ contrib_predicates.has_context_started))
+)
+
rules.add_perm(
'meinberlin_topicprio.rate_topic',
module_predicates.is_allowed_rate_item
|
{"golden_diff": "diff --git a/apps/polls/rules.py b/apps/polls/rules.py\n--- a/apps/polls/rules.py\n+++ b/apps/polls/rules.py\n@@ -2,6 +2,7 @@\n from rules.predicates import is_superuser\n \n from adhocracy4.modules import predicates as module_predicates\n+from apps.contrib import predicates as contrib_predicates\n \n from . import models\n \n@@ -12,7 +13,9 @@\n \n rules.add_perm(\n 'meinberlin_polls.view_poll',\n- module_predicates.is_public_context\n+ (module_predicates.is_project_admin |\n+ (module_predicates.is_allowed_view_item &\n+ contrib_predicates.has_context_started))\n )\n \n rules.add_perm(\ndiff --git a/apps/topicprio/rules.py b/apps/topicprio/rules.py\n--- a/apps/topicprio/rules.py\n+++ b/apps/topicprio/rules.py\n@@ -1,6 +1,7 @@\n import rules\n \n from adhocracy4.modules import predicates as module_predicates\n+from apps.contrib import predicates as contrib_predicates\n \n rules.add_perm(\n 'meinberlin_topicprio.add_topic',\n@@ -12,6 +13,13 @@\n module_predicates.is_project_admin\n )\n \n+rules.add_perm(\n+ 'meinberlin_topicprio.view_topic',\n+ (module_predicates.is_project_admin |\n+ (module_predicates.is_allowed_view_item &\n+ contrib_predicates.has_context_started))\n+)\n+\n rules.add_perm(\n 'meinberlin_topicprio.rate_topic',\n module_predicates.is_allowed_rate_item\n", "issue": "Use predicates to prevent access to items if no phase has started yet\nWith topicprio and polls items are created before any phase has been started.\r\nOn the project_detail page a according message and no content is shown.\r\nThis is checked via the `PhaseDispatchMixin`.\r\nAs for items it is not possible to use the `PhaseDispatchMixin` a new\r\npredicate has been introduced which tests if a phase has been started\r\nyet. Be aware, that this does not imply an active phase.\r\nIf no phase has been started yet access to the items should be prevented. \r\nBut if participation did finish the items should still be available.\r\nSee 362a8ce6a79888b2cc1dbdeb789107a156c5d15f for reference. \n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nfrom . import models\n\nrules.add_perm(\n 'meinberlin_polls.change_poll',\n is_superuser | module_predicates.is_context_initiator\n)\n\nrules.add_perm(\n 'meinberlin_polls.view_poll',\n module_predicates.is_public_context\n)\n\nrules.add_perm(\n 'meinberlin_polls.comment_poll',\n module_predicates.is_allowed_comment_item\n)\n\nrules.add_perm(\n 'meinberlin_polls.add_vote',\n module_predicates.is_allowed_add_item(models.Vote)\n)\n\nrules.add_perm(\n 'meinberlin_polls.change_vote',\n module_predicates.is_allowed_add_item(models.Vote)\n)\n", "path": "apps/polls/rules.py"}, {"content": "import rules\n\nfrom adhocracy4.modules import predicates as module_predicates\n\nrules.add_perm(\n 'meinberlin_topicprio.add_topic',\n module_predicates.is_project_admin\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.change_topic',\n module_predicates.is_project_admin\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.rate_topic',\n module_predicates.is_allowed_rate_item\n)\n\nrules.add_perm(\n 'meinberlin_topicprio.comment_topic',\n module_predicates.is_allowed_comment_item\n)\n", "path": "apps/topicprio/rules.py"}]}
| 1,109 | 340 |
gh_patches_debug_41121
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Code snippet cog doesn't handle 404 errors
Sentry Issue: [BOT-Z4](https://sentry.io/organizations/python-discord/issues/2368344750/?referrer=github_integration)
The following error occurs when trying to fetch from a non-exisiting repository:
```
Failed to fetch code snippet from https://api.github.com/repos/fake/link/branches. HTTP Status: 404. Message: 404, message='Not Found', url=URL('https://api.github.com/repos/fake/link/branches').
```
It should be handled by the cog and passed silently.
</issue>
<code>
[start of bot/exts/info/code_snippets.py]
1 import logging
2 import re
3 import textwrap
4 from urllib.parse import quote_plus
5
6 from aiohttp import ClientResponseError
7 from discord import Message
8 from discord.ext.commands import Cog
9
10 from bot.bot import Bot
11 from bot.constants import Channels
12 from bot.utils.messages import wait_for_deletion
13
14 log = logging.getLogger(__name__)
15
16 GITHUB_RE = re.compile(
17 r'https://github\.com/(?P<repo>[a-zA-Z0-9-]+/[\w.-]+)/blob/'
18 r'(?P<path>[^#>]+)(\?[^#>]+)?(#L(?P<start_line>\d+)([-~:]L(?P<end_line>\d+))?)'
19 )
20
21 GITHUB_GIST_RE = re.compile(
22 r'https://gist\.github\.com/([a-zA-Z0-9-]+)/(?P<gist_id>[a-zA-Z0-9]+)/*'
23 r'(?P<revision>[a-zA-Z0-9]*)/*#file-(?P<file_path>[^#>]+?)(\?[^#>]+)?'
24 r'(-L(?P<start_line>\d+)([-~:]L(?P<end_line>\d+))?)'
25 )
26
27 GITHUB_HEADERS = {'Accept': 'application/vnd.github.v3.raw'}
28
29 GITLAB_RE = re.compile(
30 r'https://gitlab\.com/(?P<repo>[\w.-]+/[\w.-]+)/\-/blob/(?P<path>[^#>]+)'
31 r'(\?[^#>]+)?(#L(?P<start_line>\d+)(-(?P<end_line>\d+))?)'
32 )
33
34 BITBUCKET_RE = re.compile(
35 r'https://bitbucket\.org/(?P<repo>[a-zA-Z0-9-]+/[\w.-]+)/src/(?P<ref>[0-9a-zA-Z]+)'
36 r'/(?P<file_path>[^#>]+)(\?[^#>]+)?(#lines-(?P<start_line>\d+)(:(?P<end_line>\d+))?)'
37 )
38
39
40 class CodeSnippets(Cog):
41 """
42 Cog that parses and sends code snippets to Discord.
43
44 Matches each message against a regex and prints the contents of all matched snippets.
45 """
46
47 async def _fetch_response(self, url: str, response_format: str, **kwargs) -> str:
48 """Makes http requests using aiohttp."""
49 try:
50 async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:
51 if response_format == 'text':
52 return await response.text()
53 elif response_format == 'json':
54 return await response.json()
55 except ClientResponseError as error:
56 log.error(f'Failed to fetch code snippet from {url}. HTTP Status: {error.status}. Message: {str(error)}.')
57
58 def _find_ref(self, path: str, refs: tuple) -> tuple:
59 """Loops through all branches and tags to find the required ref."""
60 # Base case: there is no slash in the branch name
61 ref, file_path = path.split('/', 1)
62 # In case there are slashes in the branch name, we loop through all branches and tags
63 for possible_ref in refs:
64 if path.startswith(possible_ref['name'] + '/'):
65 ref = possible_ref['name']
66 file_path = path[len(ref) + 1:]
67 break
68 return (ref, file_path)
69
70 async def _fetch_github_snippet(
71 self,
72 repo: str,
73 path: str,
74 start_line: str,
75 end_line: str
76 ) -> str:
77 """Fetches a snippet from a GitHub repo."""
78 # Search the GitHub API for the specified branch
79 branches = await self._fetch_response(
80 f'https://api.github.com/repos/{repo}/branches',
81 'json',
82 headers=GITHUB_HEADERS
83 )
84 tags = await self._fetch_response(f'https://api.github.com/repos/{repo}/tags', 'json', headers=GITHUB_HEADERS)
85 refs = branches + tags
86 ref, file_path = self._find_ref(path, refs)
87
88 file_contents = await self._fetch_response(
89 f'https://api.github.com/repos/{repo}/contents/{file_path}?ref={ref}',
90 'text',
91 headers=GITHUB_HEADERS,
92 )
93 return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)
94
95 async def _fetch_github_gist_snippet(
96 self,
97 gist_id: str,
98 revision: str,
99 file_path: str,
100 start_line: str,
101 end_line: str
102 ) -> str:
103 """Fetches a snippet from a GitHub gist."""
104 gist_json = await self._fetch_response(
105 f'https://api.github.com/gists/{gist_id}{f"/{revision}" if len(revision) > 0 else ""}',
106 'json',
107 headers=GITHUB_HEADERS,
108 )
109
110 # Check each file in the gist for the specified file
111 for gist_file in gist_json['files']:
112 if file_path == gist_file.lower().replace('.', '-'):
113 file_contents = await self._fetch_response(
114 gist_json['files'][gist_file]['raw_url'],
115 'text',
116 )
117 return self._snippet_to_codeblock(file_contents, gist_file, start_line, end_line)
118 return ''
119
120 async def _fetch_gitlab_snippet(
121 self,
122 repo: str,
123 path: str,
124 start_line: str,
125 end_line: str
126 ) -> str:
127 """Fetches a snippet from a GitLab repo."""
128 enc_repo = quote_plus(repo)
129
130 # Searches the GitLab API for the specified branch
131 branches = await self._fetch_response(
132 f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/branches',
133 'json'
134 )
135 tags = await self._fetch_response(f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/tags', 'json')
136 refs = branches + tags
137 ref, file_path = self._find_ref(path, refs)
138 enc_ref = quote_plus(ref)
139 enc_file_path = quote_plus(file_path)
140
141 file_contents = await self._fetch_response(
142 f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/files/{enc_file_path}/raw?ref={enc_ref}',
143 'text',
144 )
145 return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)
146
147 async def _fetch_bitbucket_snippet(
148 self,
149 repo: str,
150 ref: str,
151 file_path: str,
152 start_line: int,
153 end_line: int
154 ) -> str:
155 """Fetches a snippet from a BitBucket repo."""
156 file_contents = await self._fetch_response(
157 f'https://bitbucket.org/{quote_plus(repo)}/raw/{quote_plus(ref)}/{quote_plus(file_path)}',
158 'text',
159 )
160 return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)
161
162 def _snippet_to_codeblock(self, file_contents: str, file_path: str, start_line: str, end_line: str) -> str:
163 """
164 Given the entire file contents and target lines, creates a code block.
165
166 First, we split the file contents into a list of lines and then keep and join only the required
167 ones together.
168
169 We then dedent the lines to look nice, and replace all ` characters with `\u200b to prevent
170 markdown injection.
171
172 Finally, we surround the code with ``` characters.
173 """
174 # Parse start_line and end_line into integers
175 if end_line is None:
176 start_line = end_line = int(start_line)
177 else:
178 start_line = int(start_line)
179 end_line = int(end_line)
180
181 split_file_contents = file_contents.splitlines()
182
183 # Make sure that the specified lines are in range
184 if start_line > end_line:
185 start_line, end_line = end_line, start_line
186 if start_line > len(split_file_contents) or end_line < 1:
187 return ''
188 start_line = max(1, start_line)
189 end_line = min(len(split_file_contents), end_line)
190
191 # Gets the code lines, dedents them, and inserts zero-width spaces to prevent Markdown injection
192 required = '\n'.join(split_file_contents[start_line - 1:end_line])
193 required = textwrap.dedent(required).rstrip().replace('`', '`\u200b')
194
195 # Extracts the code language and checks whether it's a "valid" language
196 language = file_path.split('/')[-1].split('.')[-1]
197 trimmed_language = language.replace('-', '').replace('+', '').replace('_', '')
198 is_valid_language = trimmed_language.isalnum()
199 if not is_valid_language:
200 language = ''
201
202 # Adds a label showing the file path to the snippet
203 if start_line == end_line:
204 ret = f'`{file_path}` line {start_line}\n'
205 else:
206 ret = f'`{file_path}` lines {start_line} to {end_line}\n'
207
208 if len(required) != 0:
209 return f'{ret}```{language}\n{required}```'
210 # Returns an empty codeblock if the snippet is empty
211 return f'{ret}``` ```'
212
213 def __init__(self, bot: Bot):
214 """Initializes the cog's bot."""
215 self.bot = bot
216
217 self.pattern_handlers = [
218 (GITHUB_RE, self._fetch_github_snippet),
219 (GITHUB_GIST_RE, self._fetch_github_gist_snippet),
220 (GITLAB_RE, self._fetch_gitlab_snippet),
221 (BITBUCKET_RE, self._fetch_bitbucket_snippet)
222 ]
223
224 @Cog.listener()
225 async def on_message(self, message: Message) -> None:
226 """Checks if the message has a snippet link, removes the embed, then sends the snippet contents."""
227 if not message.author.bot:
228 all_snippets = []
229
230 for pattern, handler in self.pattern_handlers:
231 for match in pattern.finditer(message.content):
232 snippet = await handler(**match.groupdict())
233 all_snippets.append((match.start(), snippet))
234
235 # Sorts the list of snippets by their match index and joins them into a single message
236 message_to_send = '\n'.join(map(lambda x: x[1], sorted(all_snippets)))
237
238 if 0 < len(message_to_send) <= 2000 and message_to_send.count('\n') <= 15:
239 await message.edit(suppress=True)
240 if len(message_to_send) > 1000 and message.channel.id != Channels.bot_commands:
241 # Redirects to #bot-commands if the snippet contents are too long
242 await self.bot.wait_until_guild_available()
243 await message.channel.send(('The snippet you tried to send was too long. Please '
244 f'see <#{Channels.bot_commands}> for the full snippet.'))
245 bot_commands_channel = self.bot.get_channel(Channels.bot_commands)
246 await wait_for_deletion(
247 await bot_commands_channel.send(message_to_send),
248 (message.author.id,)
249 )
250 else:
251 await wait_for_deletion(
252 await message.channel.send(message_to_send),
253 (message.author.id,)
254 )
255
256
257 def setup(bot: Bot) -> None:
258 """Load the CodeSnippets cog."""
259 bot.add_cog(CodeSnippets(bot))
260
[end of bot/exts/info/code_snippets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/exts/info/code_snippets.py b/bot/exts/info/code_snippets.py
--- a/bot/exts/info/code_snippets.py
+++ b/bot/exts/info/code_snippets.py
@@ -1,6 +1,7 @@
import logging
import re
import textwrap
+from typing import Any
from urllib.parse import quote_plus
from aiohttp import ClientResponseError
@@ -44,16 +45,13 @@
Matches each message against a regex and prints the contents of all matched snippets.
"""
- async def _fetch_response(self, url: str, response_format: str, **kwargs) -> str:
+ async def _fetch_response(self, url: str, response_format: str, **kwargs) -> Any:
"""Makes http requests using aiohttp."""
- try:
- async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:
- if response_format == 'text':
- return await response.text()
- elif response_format == 'json':
- return await response.json()
- except ClientResponseError as error:
- log.error(f'Failed to fetch code snippet from {url}. HTTP Status: {error.status}. Message: {str(error)}.')
+ async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:
+ if response_format == 'text':
+ return await response.text()
+ elif response_format == 'json':
+ return await response.json()
def _find_ref(self, path: str, refs: tuple) -> tuple:
"""Loops through all branches and tags to find the required ref."""
@@ -65,7 +63,7 @@
ref = possible_ref['name']
file_path = path[len(ref) + 1:]
break
- return (ref, file_path)
+ return ref, file_path
async def _fetch_github_snippet(
self,
@@ -149,8 +147,8 @@
repo: str,
ref: str,
file_path: str,
- start_line: int,
- end_line: int
+ start_line: str,
+ end_line: str
) -> str:
"""Fetches a snippet from a BitBucket repo."""
file_contents = await self._fetch_response(
@@ -229,8 +227,16 @@
for pattern, handler in self.pattern_handlers:
for match in pattern.finditer(message.content):
- snippet = await handler(**match.groupdict())
- all_snippets.append((match.start(), snippet))
+ try:
+ snippet = await handler(**match.groupdict())
+ all_snippets.append((match.start(), snippet))
+ except ClientResponseError as error:
+ error_message = error.message # noqa: B306
+ log.log(
+ logging.DEBUG if error.status == 404 else logging.ERROR,
+ f'Failed to fetch code snippet from {match[0]!r}: {error.status} '
+ f'{error_message} for GET {error.request_info.real_url.human_repr()}'
+ )
# Sorts the list of snippets by their match index and joins them into a single message
message_to_send = '\n'.join(map(lambda x: x[1], sorted(all_snippets)))
|
{"golden_diff": "diff --git a/bot/exts/info/code_snippets.py b/bot/exts/info/code_snippets.py\n--- a/bot/exts/info/code_snippets.py\n+++ b/bot/exts/info/code_snippets.py\n@@ -1,6 +1,7 @@\n import logging\n import re\n import textwrap\n+from typing import Any\n from urllib.parse import quote_plus\n \n from aiohttp import ClientResponseError\n@@ -44,16 +45,13 @@\n Matches each message against a regex and prints the contents of all matched snippets.\n \"\"\"\n \n- async def _fetch_response(self, url: str, response_format: str, **kwargs) -> str:\n+ async def _fetch_response(self, url: str, response_format: str, **kwargs) -> Any:\n \"\"\"Makes http requests using aiohttp.\"\"\"\n- try:\n- async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:\n- if response_format == 'text':\n- return await response.text()\n- elif response_format == 'json':\n- return await response.json()\n- except ClientResponseError as error:\n- log.error(f'Failed to fetch code snippet from {url}. HTTP Status: {error.status}. Message: {str(error)}.')\n+ async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:\n+ if response_format == 'text':\n+ return await response.text()\n+ elif response_format == 'json':\n+ return await response.json()\n \n def _find_ref(self, path: str, refs: tuple) -> tuple:\n \"\"\"Loops through all branches and tags to find the required ref.\"\"\"\n@@ -65,7 +63,7 @@\n ref = possible_ref['name']\n file_path = path[len(ref) + 1:]\n break\n- return (ref, file_path)\n+ return ref, file_path\n \n async def _fetch_github_snippet(\n self,\n@@ -149,8 +147,8 @@\n repo: str,\n ref: str,\n file_path: str,\n- start_line: int,\n- end_line: int\n+ start_line: str,\n+ end_line: str\n ) -> str:\n \"\"\"Fetches a snippet from a BitBucket repo.\"\"\"\n file_contents = await self._fetch_response(\n@@ -229,8 +227,16 @@\n \n for pattern, handler in self.pattern_handlers:\n for match in pattern.finditer(message.content):\n- snippet = await handler(**match.groupdict())\n- all_snippets.append((match.start(), snippet))\n+ try:\n+ snippet = await handler(**match.groupdict())\n+ all_snippets.append((match.start(), snippet))\n+ except ClientResponseError as error:\n+ error_message = error.message # noqa: B306\n+ log.log(\n+ logging.DEBUG if error.status == 404 else logging.ERROR,\n+ f'Failed to fetch code snippet from {match[0]!r}: {error.status} '\n+ f'{error_message} for GET {error.request_info.real_url.human_repr()}'\n+ )\n \n # Sorts the list of snippets by their match index and joins them into a single message\n message_to_send = '\\n'.join(map(lambda x: x[1], sorted(all_snippets)))\n", "issue": "Code snippet cog doesn't handle 404 errors\nSentry Issue: [BOT-Z4](https://sentry.io/organizations/python-discord/issues/2368344750/?referrer=github_integration)\n\nThe following error occurs when trying to fetch from a non-exisiting repository:\n\n```\nFailed to fetch code snippet from https://api.github.com/repos/fake/link/branches. HTTP Status: 404. Message: 404, message='Not Found', url=URL('https://api.github.com/repos/fake/link/branches').\n```\n\nIt should be handled by the cog and passed silently.\n", "before_files": [{"content": "import logging\nimport re\nimport textwrap\nfrom urllib.parse import quote_plus\n\nfrom aiohttp import ClientResponseError\nfrom discord import Message\nfrom discord.ext.commands import Cog\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels\nfrom bot.utils.messages import wait_for_deletion\n\nlog = logging.getLogger(__name__)\n\nGITHUB_RE = re.compile(\n r'https://github\\.com/(?P<repo>[a-zA-Z0-9-]+/[\\w.-]+)/blob/'\n r'(?P<path>[^#>]+)(\\?[^#>]+)?(#L(?P<start_line>\\d+)([-~:]L(?P<end_line>\\d+))?)'\n)\n\nGITHUB_GIST_RE = re.compile(\n r'https://gist\\.github\\.com/([a-zA-Z0-9-]+)/(?P<gist_id>[a-zA-Z0-9]+)/*'\n r'(?P<revision>[a-zA-Z0-9]*)/*#file-(?P<file_path>[^#>]+?)(\\?[^#>]+)?'\n r'(-L(?P<start_line>\\d+)([-~:]L(?P<end_line>\\d+))?)'\n)\n\nGITHUB_HEADERS = {'Accept': 'application/vnd.github.v3.raw'}\n\nGITLAB_RE = re.compile(\n r'https://gitlab\\.com/(?P<repo>[\\w.-]+/[\\w.-]+)/\\-/blob/(?P<path>[^#>]+)'\n r'(\\?[^#>]+)?(#L(?P<start_line>\\d+)(-(?P<end_line>\\d+))?)'\n)\n\nBITBUCKET_RE = re.compile(\n r'https://bitbucket\\.org/(?P<repo>[a-zA-Z0-9-]+/[\\w.-]+)/src/(?P<ref>[0-9a-zA-Z]+)'\n r'/(?P<file_path>[^#>]+)(\\?[^#>]+)?(#lines-(?P<start_line>\\d+)(:(?P<end_line>\\d+))?)'\n)\n\n\nclass CodeSnippets(Cog):\n \"\"\"\n Cog that parses and sends code snippets to Discord.\n\n Matches each message against a regex and prints the contents of all matched snippets.\n \"\"\"\n\n async def _fetch_response(self, url: str, response_format: str, **kwargs) -> str:\n \"\"\"Makes http requests using aiohttp.\"\"\"\n try:\n async with self.bot.http_session.get(url, raise_for_status=True, **kwargs) as response:\n if response_format == 'text':\n return await response.text()\n elif response_format == 'json':\n return await response.json()\n except ClientResponseError as error:\n log.error(f'Failed to fetch code snippet from {url}. HTTP Status: {error.status}. Message: {str(error)}.')\n\n def _find_ref(self, path: str, refs: tuple) -> tuple:\n \"\"\"Loops through all branches and tags to find the required ref.\"\"\"\n # Base case: there is no slash in the branch name\n ref, file_path = path.split('/', 1)\n # In case there are slashes in the branch name, we loop through all branches and tags\n for possible_ref in refs:\n if path.startswith(possible_ref['name'] + '/'):\n ref = possible_ref['name']\n file_path = path[len(ref) + 1:]\n break\n return (ref, file_path)\n\n async def _fetch_github_snippet(\n self,\n repo: str,\n path: str,\n start_line: str,\n end_line: str\n ) -> str:\n \"\"\"Fetches a snippet from a GitHub repo.\"\"\"\n # Search the GitHub API for the specified branch\n branches = await self._fetch_response(\n f'https://api.github.com/repos/{repo}/branches',\n 'json',\n headers=GITHUB_HEADERS\n )\n tags = await self._fetch_response(f'https://api.github.com/repos/{repo}/tags', 'json', headers=GITHUB_HEADERS)\n refs = branches + tags\n ref, file_path = self._find_ref(path, refs)\n\n file_contents = await self._fetch_response(\n f'https://api.github.com/repos/{repo}/contents/{file_path}?ref={ref}',\n 'text',\n headers=GITHUB_HEADERS,\n )\n return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)\n\n async def _fetch_github_gist_snippet(\n self,\n gist_id: str,\n revision: str,\n file_path: str,\n start_line: str,\n end_line: str\n ) -> str:\n \"\"\"Fetches a snippet from a GitHub gist.\"\"\"\n gist_json = await self._fetch_response(\n f'https://api.github.com/gists/{gist_id}{f\"/{revision}\" if len(revision) > 0 else \"\"}',\n 'json',\n headers=GITHUB_HEADERS,\n )\n\n # Check each file in the gist for the specified file\n for gist_file in gist_json['files']:\n if file_path == gist_file.lower().replace('.', '-'):\n file_contents = await self._fetch_response(\n gist_json['files'][gist_file]['raw_url'],\n 'text',\n )\n return self._snippet_to_codeblock(file_contents, gist_file, start_line, end_line)\n return ''\n\n async def _fetch_gitlab_snippet(\n self,\n repo: str,\n path: str,\n start_line: str,\n end_line: str\n ) -> str:\n \"\"\"Fetches a snippet from a GitLab repo.\"\"\"\n enc_repo = quote_plus(repo)\n\n # Searches the GitLab API for the specified branch\n branches = await self._fetch_response(\n f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/branches',\n 'json'\n )\n tags = await self._fetch_response(f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/tags', 'json')\n refs = branches + tags\n ref, file_path = self._find_ref(path, refs)\n enc_ref = quote_plus(ref)\n enc_file_path = quote_plus(file_path)\n\n file_contents = await self._fetch_response(\n f'https://gitlab.com/api/v4/projects/{enc_repo}/repository/files/{enc_file_path}/raw?ref={enc_ref}',\n 'text',\n )\n return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)\n\n async def _fetch_bitbucket_snippet(\n self,\n repo: str,\n ref: str,\n file_path: str,\n start_line: int,\n end_line: int\n ) -> str:\n \"\"\"Fetches a snippet from a BitBucket repo.\"\"\"\n file_contents = await self._fetch_response(\n f'https://bitbucket.org/{quote_plus(repo)}/raw/{quote_plus(ref)}/{quote_plus(file_path)}',\n 'text',\n )\n return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line)\n\n def _snippet_to_codeblock(self, file_contents: str, file_path: str, start_line: str, end_line: str) -> str:\n \"\"\"\n Given the entire file contents and target lines, creates a code block.\n\n First, we split the file contents into a list of lines and then keep and join only the required\n ones together.\n\n We then dedent the lines to look nice, and replace all ` characters with `\\u200b to prevent\n markdown injection.\n\n Finally, we surround the code with ``` characters.\n \"\"\"\n # Parse start_line and end_line into integers\n if end_line is None:\n start_line = end_line = int(start_line)\n else:\n start_line = int(start_line)\n end_line = int(end_line)\n\n split_file_contents = file_contents.splitlines()\n\n # Make sure that the specified lines are in range\n if start_line > end_line:\n start_line, end_line = end_line, start_line\n if start_line > len(split_file_contents) or end_line < 1:\n return ''\n start_line = max(1, start_line)\n end_line = min(len(split_file_contents), end_line)\n\n # Gets the code lines, dedents them, and inserts zero-width spaces to prevent Markdown injection\n required = '\\n'.join(split_file_contents[start_line - 1:end_line])\n required = textwrap.dedent(required).rstrip().replace('`', '`\\u200b')\n\n # Extracts the code language and checks whether it's a \"valid\" language\n language = file_path.split('/')[-1].split('.')[-1]\n trimmed_language = language.replace('-', '').replace('+', '').replace('_', '')\n is_valid_language = trimmed_language.isalnum()\n if not is_valid_language:\n language = ''\n\n # Adds a label showing the file path to the snippet\n if start_line == end_line:\n ret = f'`{file_path}` line {start_line}\\n'\n else:\n ret = f'`{file_path}` lines {start_line} to {end_line}\\n'\n\n if len(required) != 0:\n return f'{ret}```{language}\\n{required}```'\n # Returns an empty codeblock if the snippet is empty\n return f'{ret}``` ```'\n\n def __init__(self, bot: Bot):\n \"\"\"Initializes the cog's bot.\"\"\"\n self.bot = bot\n\n self.pattern_handlers = [\n (GITHUB_RE, self._fetch_github_snippet),\n (GITHUB_GIST_RE, self._fetch_github_gist_snippet),\n (GITLAB_RE, self._fetch_gitlab_snippet),\n (BITBUCKET_RE, self._fetch_bitbucket_snippet)\n ]\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Checks if the message has a snippet link, removes the embed, then sends the snippet contents.\"\"\"\n if not message.author.bot:\n all_snippets = []\n\n for pattern, handler in self.pattern_handlers:\n for match in pattern.finditer(message.content):\n snippet = await handler(**match.groupdict())\n all_snippets.append((match.start(), snippet))\n\n # Sorts the list of snippets by their match index and joins them into a single message\n message_to_send = '\\n'.join(map(lambda x: x[1], sorted(all_snippets)))\n\n if 0 < len(message_to_send) <= 2000 and message_to_send.count('\\n') <= 15:\n await message.edit(suppress=True)\n if len(message_to_send) > 1000 and message.channel.id != Channels.bot_commands:\n # Redirects to #bot-commands if the snippet contents are too long\n await self.bot.wait_until_guild_available()\n await message.channel.send(('The snippet you tried to send was too long. Please '\n f'see <#{Channels.bot_commands}> for the full snippet.'))\n bot_commands_channel = self.bot.get_channel(Channels.bot_commands)\n await wait_for_deletion(\n await bot_commands_channel.send(message_to_send),\n (message.author.id,)\n )\n else:\n await wait_for_deletion(\n await message.channel.send(message_to_send),\n (message.author.id,)\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the CodeSnippets cog.\"\"\"\n bot.add_cog(CodeSnippets(bot))\n", "path": "bot/exts/info/code_snippets.py"}]}
| 3,823 | 734 |
gh_patches_debug_14913
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1071
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tf.reshape in Embedding layer call supports for partially-defined shape
Need to convert None to -1 in TensorShape for 2D inputs (batched input).
</issue>
<code>
[start of elasticdl/python/elasticdl/layers/embedding.py]
1 import tensorflow as tf
2 from tensorflow.python.keras.utils import tf_utils
3
4
5 class Embedding(tf.keras.layers.Layer):
6 """
7 Input: indexes for the embedding entries with a shape of
8 (batch_size, input_length). Input can be either dense tensor
9 or SparseTensor.
10 Output:
11 corresponding (combined) embeddings with a shape of
12 (batch_size, input_length, output_dim) if combiner is None
13 (batch_size, output_dim) if combiner is not None
14 Arguments:
15 output_dim: the dimension of the embedding vector
16 embedding_initializer: Initializer for embedding table
17 mask_zero: Whether or not the input value 0 is a special "padding"
18 value that should be masked out.
19 If input is SparseTensor, mask_zero must be False.
20 input_length: Length of input sequences, when it is constant.
21 This argument is required if you are going to connect
22 `Flatten` then `Dense` layers upstream
23 (without it, the shape of the dense outputs cannot be computed).
24 combiner: A string specifying the reduction op or None if not used.
25 "mean", "sqrtn" and "sum" are supported for the reduction op.
26 If input is SparseTensor, combiner must set as a reduction op.
27 """
28
29 def __init__(
30 self,
31 output_dim,
32 embedding_initializer="uniform",
33 mask_zero=False,
34 input_length=None,
35 combiner=None,
36 **kwargs
37 ):
38 if "input_shape" not in kwargs and input_length:
39 kwargs["input_shape"] = (input_length,)
40 super(Embedding, self).__init__(**kwargs)
41
42 self.output_dim = output_dim
43 self.embedding_initializer = embedding_initializer
44 self.supports_masking = mask_zero
45 self.input_length = input_length
46 self.combiner = combiner
47 self.tape = None
48 self.worker = None
49 self.bet_ids_pair = []
50
51 @tf_utils.shape_type_conversion
52 def compute_output_shape(self, input_shape):
53 # this function is taken from
54 # tf.keras.layers.Embedding.compute_output_shape
55 # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156
56 if self.input_length is None:
57 return input_shape + (self.output_dim,)
58 else:
59 if isinstance(self.input_length, (list, tuple)):
60 in_lens = list(self.input_length)
61 else:
62 in_lens = [self.input_length]
63 if len(in_lens) != len(input_shape) - 1:
64 raise ValueError(
65 '"input_length" is %s, '
66 "but received input has shape %s"
67 % (str(self.input_length), str(input_shape))
68 )
69 else:
70 for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
71 if s1 is not None and s2 is not None and s1 != s2:
72 raise ValueError(
73 '"input_length" is %s, '
74 "but received input has shape %s"
75 % (str(self.input_length), str(input_shape))
76 )
77 elif s1 is None:
78 in_lens[i] = s2
79 return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
80
81 @property
82 def name(self):
83 return self._name
84
85 @staticmethod
86 def get_key(name_list):
87 return "-".join(map(str, name_list))
88
89 def lookup_embedding(self, unique_ids):
90 batch_embedding = self.worker.lookup_embedding(
91 unique_ids, self._name, self.embedding_initializer, self.output_dim
92 )
93 return batch_embedding
94
95 def call(self, input):
96 if isinstance(input, tf.SparseTensor):
97 return self._sparse_input_call(input)
98
99 ids = tf.convert_to_tensor(input, name="embedding_ids")
100 flat_ids = tf.reshape(ids, [-1])
101 unique_ids, idx = tf.unique(flat_ids)
102 batch_embedding_tensor = tf.py_function(
103 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32
104 )
105 if self.tape:
106 # tape.watch works with eager mode only.
107 # Gradient for embeddings is SparseTensor here due to tf.gather op.
108 # tf.gather accesses tensor slices, resulting in sparse tensor
109 # gradient.
110 if not tf.executing_eagerly():
111 raise RuntimeError("tape.watch only works with eager mode")
112 self.tape.watch(batch_embedding_tensor)
113 self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))
114 outputs = tf.gather(batch_embedding_tensor, idx)
115 outputs = tf.reshape(
116 outputs, ids.get_shape().concatenate(self.output_dim)
117 )
118 # TODO: support combiner for dense input
119 return outputs
120
121 def _sparse_input_call(self, sparse_input):
122 if self.combiner not in ["sum", "mean", "sqrtn"]:
123 raise ValueError(
124 "combiner must set sum, mean or sqrtn for sparse input"
125 )
126 unique_ids, idx = tf.unique(sparse_input.values)
127 embeddings = tf.py_function(
128 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32
129 )
130 if self.tape:
131 # tape.watch works with eager mode only
132 # gradient for embeddings is dense tensor for sparse_input_call
133 if not tf.executing_eagerly():
134 raise RuntimeError("tape.watch only works with eager mode")
135 self.tape.watch(embeddings)
136 self.bet_ids_pair.append((embeddings, unique_ids))
137 segment_ids = sparse_input.indices[:, 0]
138 if segment_ids.dtype != tf.int32:
139 segment_ids = tf.cast(segment_ids, tf.int32)
140
141 if self.combiner == "sum":
142 embeddings = tf.sparse.segment_sum(embeddings, idx, segment_ids)
143 elif self.combiner == "mean":
144 embeddings = tf.sparse.segment_mean(embeddings, idx, segment_ids)
145 elif self.combiner == "sqrtn":
146 embeddings = tf.sparse.segment_sqrt_n(embeddings, idx, segment_ids)
147 return embeddings
148
149 def compute_mask(self, inputs, mask=None):
150 if isinstance(input, tf.SparseTensor):
151 raise ValueError("SparseTensor inputs do not support mask_zero")
152 if not self.supports_masking:
153 return None
154 return tf.math.not_equal(inputs, 0)
155
156 def reset(self):
157 self.bet_ids_pair = []
158 self.tape = None
159
160 def set_tape(self, tape):
161 self.tape = tape
162
163 def set_worker(self, worker):
164 self.worker = worker
165
[end of elasticdl/python/elasticdl/layers/embedding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py
--- a/elasticdl/python/elasticdl/layers/embedding.py
+++ b/elasticdl/python/elasticdl/layers/embedding.py
@@ -112,9 +112,12 @@
self.tape.watch(batch_embedding_tensor)
self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))
outputs = tf.gather(batch_embedding_tensor, idx)
- outputs = tf.reshape(
- outputs, ids.get_shape().concatenate(self.output_dim)
- )
+ # tf.reshape does not support shape with None. Replace None with -1.
+ if ids.get_shape().rank == 2:
+ output_shape = (-1, ids.get_shape()[1], self.output_dim)
+ else:
+ output_shape = ids.get_shape().concatenate(self.output_dim)
+ outputs = tf.reshape(outputs, output_shape)
# TODO: support combiner for dense input
return outputs
|
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py\n--- a/elasticdl/python/elasticdl/layers/embedding.py\n+++ b/elasticdl/python/elasticdl/layers/embedding.py\n@@ -112,9 +112,12 @@\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n- outputs = tf.reshape(\n- outputs, ids.get_shape().concatenate(self.output_dim)\n- )\n+ # tf.reshape does not support shape with None. Replace None with -1.\n+ if ids.get_shape().rank == 2:\n+ output_shape = (-1, ids.get_shape()[1], self.output_dim)\n+ else:\n+ output_shape = ids.get_shape().concatenate(self.output_dim)\n+ outputs = tf.reshape(outputs, output_shape)\n # TODO: support combiner for dense input\n return outputs\n", "issue": "tf.reshape in Embedding layer call supports for partially-defined shape\nNeed to convert None to -1 in TensorShape for 2D inputs (batched input).\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries with a shape of\n (batch_size, input_length). Input can be either dense tensor\n or SparseTensor.\n Output:\n corresponding (combined) embeddings with a shape of\n (batch_size, input_length, output_dim) if combiner is None\n (batch_size, output_dim) if combiner is not None\n Arguments:\n output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n If input is SparseTensor, mask_zero must be False.\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n combiner: A string specifying the reduction op or None if not used.\n \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n If input is SparseTensor, combiner must set as a reduction op.\n \"\"\"\n\n def __init__(\n self,\n output_dim,\n embedding_initializer=\"uniform\",\n mask_zero=False,\n input_length=None,\n combiner=None,\n **kwargs\n ):\n if \"input_shape\" not in kwargs and input_length:\n kwargs[\"input_shape\"] = (input_length,)\n super(Embedding, self).__init__(**kwargs)\n\n self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n self.supports_masking = mask_zero\n self.input_length = input_length\n self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n # this function is taken from\n # tf.keras.layers.Embedding.compute_output_shape\n # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.lookup_embedding(\n unique_ids, self._name, self.embedding_initializer, self.output_dim\n )\n return batch_embedding\n\n def call(self, input):\n if isinstance(input, tf.SparseTensor):\n return self._sparse_input_call(input)\n\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only.\n # Gradient for embeddings is SparseTensor here due to tf.gather op.\n # tf.gather accesses tensor slices, resulting in sparse tensor\n # gradient.\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.output_dim)\n )\n # TODO: support combiner for dense input\n return outputs\n\n def _sparse_input_call(self, sparse_input):\n if self.combiner not in [\"sum\", \"mean\", \"sqrtn\"]:\n raise ValueError(\n \"combiner must set sum, mean or sqrtn for sparse input\"\n )\n unique_ids, idx = tf.unique(sparse_input.values)\n embeddings = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n # gradient for embeddings is dense tensor for sparse_input_call\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(embeddings)\n self.bet_ids_pair.append((embeddings, unique_ids))\n segment_ids = sparse_input.indices[:, 0]\n if segment_ids.dtype != tf.int32:\n segment_ids = tf.cast(segment_ids, tf.int32)\n\n if self.combiner == \"sum\":\n embeddings = tf.sparse.segment_sum(embeddings, idx, segment_ids)\n elif self.combiner == \"mean\":\n embeddings = tf.sparse.segment_mean(embeddings, idx, segment_ids)\n elif self.combiner == \"sqrtn\":\n embeddings = tf.sparse.segment_sqrt_n(embeddings, idx, segment_ids)\n return embeddings\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(input, tf.SparseTensor):\n raise ValueError(\"SparseTensor inputs do not support mask_zero\")\n if not self.supports_masking:\n return None\n return tf.math.not_equal(inputs, 0)\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}]}
| 2,429 | 228 |
gh_patches_debug_19178
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1517
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unchosen extras for events not counted correctly
See https://online.ntnu.no/dashboard/events/265/attendees/
There are 3-4 people who haven't chosen any extras, but in the summary at the bottom, it sais Ikke valgt: 0
</issue>
<code>
[start of apps/events/dashboard/views.py]
1 # -*- coding: utf-8 -*-
2
3 from datetime import datetime, time, timedelta
4
5 from django.contrib import messages
6 from django.contrib.auth.decorators import login_required
7 from django.core.exceptions import PermissionDenied
8 from django.forms.models import modelformset_factory
9 from django.http import HttpResponse, JsonResponse
10 from django.shortcuts import get_object_or_404, redirect, render
11 from django.utils import timezone
12 from django.utils.translation import ugettext as _
13 from guardian.decorators import permission_required
14
15 from apps.dashboard.tools import get_base_context, has_access
16 from apps.events.dashboard.forms import (ChangeAttendanceEventForm, ChangeEventForm,
17 ChangeReservationForm)
18 from apps.events.dashboard.utils import event_ajax_handler
19 from apps.events.models import AttendanceEvent, Attendee, Event, Reservation, Reservee
20 from apps.events.utils import get_group_restricted_events, get_types_allowed
21
22
23 @login_required
24 @permission_required('events.view_event', return_403=True)
25 def index(request):
26 if not has_access(request):
27 raise PermissionDenied
28
29 allowed_events = get_group_restricted_events(request.user, True)
30 events = allowed_events.filter(event_start__gte=timezone.now().date()).order_by('event_start')
31
32 context = get_base_context(request)
33 context['events'] = events
34
35 return render(request, 'events/dashboard/index.html', context)
36
37
38 @login_required
39 @permission_required('events.view_event', return_403=True)
40 def past(request):
41 if not has_access(request):
42 raise PermissionDenied
43
44 allowed_events = get_group_restricted_events(request.user, True)
45 events = allowed_events.filter(event_start__lt=timezone.now().date()).order_by('-event_start')
46
47 context = get_base_context(request)
48 context['events'] = events
49
50 return render(request, 'events/dashboard/index.html', context)
51
52
53 @login_required
54 @permission_required('events.view_event', return_403=True)
55 def create_event(request):
56 if not has_access(request):
57 raise PermissionDenied
58
59 context = get_base_context(request)
60
61 if request.method == 'POST':
62 form = ChangeEventForm(request.POST)
63 if form.is_valid():
64 cleaned = form.cleaned_data
65
66 if cleaned['event_type'] not in get_types_allowed(request.user):
67 messages.error(request, _(
68 "Du har ikke tilgang til å lage arranngement av typen '%s'.") % cleaned['event_type'])
69 context['change_event_form'] = form
70
71 else:
72 # Create object, but do not commit to db. We need to add stuff.
73 event = form.save(commit=False)
74 # Add author
75 event.author = request.user
76 event.save()
77
78 messages.success(request, _("Arrangementet ble opprettet."))
79 return redirect('dashboard_event_details', event_id=event.id)
80
81 else:
82 context['change_event_form'] = form
83
84 if 'change_event_form' not in context.keys():
85 context['change_event_form'] = ChangeEventForm()
86
87 context['event'] = _('Nytt arrangement')
88 context['active_tab'] = 'details'
89
90 return render(request, 'events/dashboard/details.html', context)
91
92
93 def _create_details_context(request, event_id):
94 """
95 Prepare a context to be shared for all detail views.
96 """
97
98 event = get_object_or_404(Event, pk=event_id)
99
100 # Start with adding base context and the event itself
101 context = get_base_context(request)
102 context['event'] = event
103
104 # Add forms
105 context['change_event_form'] = ChangeEventForm(instance=event)
106 if event.is_attendance_event():
107 context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)
108 if event.attendance_event.has_reservation:
109 context['change_reservation_form'] = ChangeReservationForm(instance=event.attendance_event.reserved_seats)
110 seats = event.attendance_event.reserved_seats.seats
111 ReserveeFormSet = modelformset_factory(
112 Reservee, max_num=seats, extra=seats, fields=['name', 'note', 'allergies'])
113 context['change_reservees_formset'] = ReserveeFormSet(
114 queryset=event.attendance_event.reserved_seats.reservees.all())
115
116 return context
117
118
119 @login_required
120 @permission_required('events.view_event', return_403=True)
121 def event_details(request, event_id, active_tab='details'):
122 if not has_access(request):
123 raise PermissionDenied
124
125 context = _create_details_context(request, event_id)
126 context['active_tab'] = active_tab
127
128 return render(request, 'events/dashboard/details.html', context)
129
130
131 @login_required
132 @permission_required('events.view_attendanceevent', return_403=True)
133 def event_change_attendance(request, event_id):
134 context = _create_details_context(request, event_id)
135 context['active_tab'] = 'attendance'
136
137 event = context['event']
138
139 if not event.is_attendance_event():
140 registration_start = datetime.combine(event.event_start - timedelta(days=7), time(12, 0, 0))
141 timezone.make_aware(registration_start, timezone.get_current_timezone())
142 unattend_deadline = registration_start + timedelta(days=5)
143 registration_end = registration_start + timedelta(days=6)
144
145 attendance_event = AttendanceEvent(
146 event=event,
147 max_capacity=0,
148 registration_start=registration_start,
149 unattend_deadline=unattend_deadline,
150 registration_end=registration_end
151 )
152 attendance_event.save()
153 context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)
154
155 else:
156 if request.method == 'POST':
157 form = ChangeAttendanceEventForm(request.POST, instance=event.attendance_event)
158 if form.is_valid():
159 form.save()
160 messages.success(request, _("Påmeldingsdetaljer ble lagret."))
161 context['change_attendance_form'] = form
162
163 return render(request, 'events/dashboard/details.html', context)
164
165
166 @login_required
167 @permission_required('events.view_attendee', return_403=True)
168 def event_change_attendees(request, event_id, active_tab='attendees'):
169 if not has_access(request):
170 raise PermissionDenied
171
172 context = _create_details_context(request, event_id)
173 context['active_tab'] = 'attendees'
174
175 event = context['event']
176
177 if not event.is_attendance_event():
178 messages.error(request, _("Dette er ikke et påmeldingsarrangement."))
179 return redirect('dashboard_event_details_active', event_id=event.id, active_tab='details')
180
181 # AJAX
182 if request.method == 'POST':
183 if request.is_ajax and 'action' in request.POST:
184 if not event.is_attendance_event:
185 return HttpResponse(_('Dette er ikke et påmeldingsarrangement.'), status=400)
186
187 return JsonResponse(event_ajax_handler(event, request))
188
189 # NON AJAX
190 context = get_base_context(request)
191
192 context['event'] = event
193 context['active_tab'] = active_tab
194
195 extras = {}
196 if event.is_attendance_event() and event.attendance_event.extras:
197 for extra in event.attendance_event.extras.all():
198 extras[extra] = {"type": extra, "attending": 0, "waits": 0, "allergics": []}
199
200 count_extras(extras, "attending", event.attendance_event.attendees_qs)
201 count_extras(extras, "waits", event.attendance_event.waitlist_qs)
202
203 context['change_event_form'] = ChangeEventForm(instance=event)
204 if event.is_attendance_event():
205 context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)
206
207 context['extras'] = extras
208 context['change_event_form'] = ChangeEventForm(instance=event)
209
210 return render(request, 'events/dashboard/details.html', context)
211
212
213 def count_extras(arr, inlist, atts):
214 for att in atts:
215 choice = "Ikke valgt" if att.extras is None else att.extras
216 if att.extras not in arr:
217 arr[choice] = {"type": choice, "attending": 0, "waits": 0, "allergics": []}
218 ex = arr[choice]
219 ex[inlist] += 1
220 if att.user.allergies:
221 what_list = "påmeldt" if inlist is "attending" else "venteliste"
222 ex["allergics"].append({"user": att.user, "list": what_list})
223
224
225 @login_required
226 @permission_required('events.view_reservation', return_403=True)
227 def event_change_reservation(request, event_id):
228 if not has_access(request):
229 raise PermissionDenied
230
231 context = _create_details_context(request, event_id)
232 context['active_tab'] = 'reservation'
233
234 event = context['event']
235
236 if not event.is_attendance_event():
237 messages.error(request, _("Dette er ikke et påmeldingsarrangement."))
238 return redirect('dashboard_event_details_active', event_id=event.id, active_tab='details')
239
240 if request.method == 'POST':
241 if not event.attendance_event.has_reservation:
242 reservation = Reservation(
243 attendance_event=event.attendance_event,
244 seats=0
245 )
246 reservation.save()
247 context['change_reservation_form'] = ChangeReservationForm(instance=reservation)
248 else:
249 form = ChangeReservationForm(request.POST, instance=event.attendance_event.reserved_seats)
250 if form.is_valid():
251 messages.success(request, _("Reservasjonen ble lagret."))
252 form.save()
253 context['change_reservation_form'] = form
254
255 return render(request, 'events/dashboard/details.html', context)
256
257
258 @login_required
259 @permission_required('events.view_attendee', return_403=True)
260 def attendee_details(request, attendee_id):
261
262 context = get_base_context(request)
263
264 attendee = get_object_or_404(Attendee, pk=attendee_id)
265
266 context['attendee'] = attendee
267 return render(request, 'events/dashboard/attendee.html', context)
268
[end of apps/events/dashboard/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/events/dashboard/views.py b/apps/events/dashboard/views.py
--- a/apps/events/dashboard/views.py
+++ b/apps/events/dashboard/views.py
@@ -210,16 +210,16 @@
return render(request, 'events/dashboard/details.html', context)
-def count_extras(arr, inlist, atts):
- for att in atts:
- choice = "Ikke valgt" if att.extras is None else att.extras
- if att.extras not in arr:
- arr[choice] = {"type": choice, "attending": 0, "waits": 0, "allergics": []}
- ex = arr[choice]
- ex[inlist] += 1
- if att.user.allergies:
- what_list = "påmeldt" if inlist is "attending" else "venteliste"
- ex["allergics"].append({"user": att.user, "list": what_list})
+def count_extras(event_extras, attendance_list, attendees):
+ for attendee in attendees:
+ choice = attendee.extras
+ if attendee.extras not in event_extras:
+ event_extras[choice] = {"type": choice, "attending": 0, "waits": 0, "allergics": []}
+ ex = event_extras[choice]
+ ex[attendance_list] += 1
+ if attendee.user.allergies:
+ what_list = "påmeldt" if attendance_list is "attending" else "venteliste"
+ ex["allergics"].append({"user": attendee.user, "list": what_list})
@login_required
|
{"golden_diff": "diff --git a/apps/events/dashboard/views.py b/apps/events/dashboard/views.py\n--- a/apps/events/dashboard/views.py\n+++ b/apps/events/dashboard/views.py\n@@ -210,16 +210,16 @@\n return render(request, 'events/dashboard/details.html', context)\n \n \n-def count_extras(arr, inlist, atts):\n- for att in atts:\n- choice = \"Ikke valgt\" if att.extras is None else att.extras\n- if att.extras not in arr:\n- arr[choice] = {\"type\": choice, \"attending\": 0, \"waits\": 0, \"allergics\": []}\n- ex = arr[choice]\n- ex[inlist] += 1\n- if att.user.allergies:\n- what_list = \"p\u00e5meldt\" if inlist is \"attending\" else \"venteliste\"\n- ex[\"allergics\"].append({\"user\": att.user, \"list\": what_list})\n+def count_extras(event_extras, attendance_list, attendees):\n+ for attendee in attendees:\n+ choice = attendee.extras\n+ if attendee.extras not in event_extras:\n+ event_extras[choice] = {\"type\": choice, \"attending\": 0, \"waits\": 0, \"allergics\": []}\n+ ex = event_extras[choice]\n+ ex[attendance_list] += 1\n+ if attendee.user.allergies:\n+ what_list = \"p\u00e5meldt\" if attendance_list is \"attending\" else \"venteliste\"\n+ ex[\"allergics\"].append({\"user\": attendee.user, \"list\": what_list})\n \n \n @login_required\n", "issue": "Unchosen extras for events not counted correctly\nSee https://online.ntnu.no/dashboard/events/265/attendees/\nThere are 3-4 people who haven't chosen any extras, but in the summary at the bottom, it sais Ikke valgt: 0\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, time, timedelta\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.forms.models import modelformset_factory\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom guardian.decorators import permission_required\n\nfrom apps.dashboard.tools import get_base_context, has_access\nfrom apps.events.dashboard.forms import (ChangeAttendanceEventForm, ChangeEventForm,\n ChangeReservationForm)\nfrom apps.events.dashboard.utils import event_ajax_handler\nfrom apps.events.models import AttendanceEvent, Attendee, Event, Reservation, Reservee\nfrom apps.events.utils import get_group_restricted_events, get_types_allowed\n\n\n@login_required\n@permission_required('events.view_event', return_403=True)\ndef index(request):\n if not has_access(request):\n raise PermissionDenied\n\n allowed_events = get_group_restricted_events(request.user, True)\n events = allowed_events.filter(event_start__gte=timezone.now().date()).order_by('event_start')\n\n context = get_base_context(request)\n context['events'] = events\n\n return render(request, 'events/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('events.view_event', return_403=True)\ndef past(request):\n if not has_access(request):\n raise PermissionDenied\n\n allowed_events = get_group_restricted_events(request.user, True)\n events = allowed_events.filter(event_start__lt=timezone.now().date()).order_by('-event_start')\n\n context = get_base_context(request)\n context['events'] = events\n\n return render(request, 'events/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('events.view_event', return_403=True)\ndef create_event(request):\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n if request.method == 'POST':\n form = ChangeEventForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n if cleaned['event_type'] not in get_types_allowed(request.user):\n messages.error(request, _(\n \"Du har ikke tilgang til \u00e5 lage arranngement av typen '%s'.\") % cleaned['event_type'])\n context['change_event_form'] = form\n\n else:\n # Create object, but do not commit to db. We need to add stuff.\n event = form.save(commit=False)\n # Add author\n event.author = request.user\n event.save()\n\n messages.success(request, _(\"Arrangementet ble opprettet.\"))\n return redirect('dashboard_event_details', event_id=event.id)\n\n else:\n context['change_event_form'] = form\n\n if 'change_event_form' not in context.keys():\n context['change_event_form'] = ChangeEventForm()\n\n context['event'] = _('Nytt arrangement')\n context['active_tab'] = 'details'\n\n return render(request, 'events/dashboard/details.html', context)\n\n\ndef _create_details_context(request, event_id):\n \"\"\"\n Prepare a context to be shared for all detail views.\n \"\"\"\n\n event = get_object_or_404(Event, pk=event_id)\n\n # Start with adding base context and the event itself\n context = get_base_context(request)\n context['event'] = event\n\n # Add forms\n context['change_event_form'] = ChangeEventForm(instance=event)\n if event.is_attendance_event():\n context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)\n if event.attendance_event.has_reservation:\n context['change_reservation_form'] = ChangeReservationForm(instance=event.attendance_event.reserved_seats)\n seats = event.attendance_event.reserved_seats.seats\n ReserveeFormSet = modelformset_factory(\n Reservee, max_num=seats, extra=seats, fields=['name', 'note', 'allergies'])\n context['change_reservees_formset'] = ReserveeFormSet(\n queryset=event.attendance_event.reserved_seats.reservees.all())\n\n return context\n\n\n@login_required\n@permission_required('events.view_event', return_403=True)\ndef event_details(request, event_id, active_tab='details'):\n if not has_access(request):\n raise PermissionDenied\n\n context = _create_details_context(request, event_id)\n context['active_tab'] = active_tab\n\n return render(request, 'events/dashboard/details.html', context)\n\n\n@login_required\n@permission_required('events.view_attendanceevent', return_403=True)\ndef event_change_attendance(request, event_id):\n context = _create_details_context(request, event_id)\n context['active_tab'] = 'attendance'\n\n event = context['event']\n\n if not event.is_attendance_event():\n registration_start = datetime.combine(event.event_start - timedelta(days=7), time(12, 0, 0))\n timezone.make_aware(registration_start, timezone.get_current_timezone())\n unattend_deadline = registration_start + timedelta(days=5)\n registration_end = registration_start + timedelta(days=6)\n\n attendance_event = AttendanceEvent(\n event=event,\n max_capacity=0,\n registration_start=registration_start,\n unattend_deadline=unattend_deadline,\n registration_end=registration_end\n )\n attendance_event.save()\n context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)\n\n else:\n if request.method == 'POST':\n form = ChangeAttendanceEventForm(request.POST, instance=event.attendance_event)\n if form.is_valid():\n form.save()\n messages.success(request, _(\"P\u00e5meldingsdetaljer ble lagret.\"))\n context['change_attendance_form'] = form\n\n return render(request, 'events/dashboard/details.html', context)\n\n\n@login_required\n@permission_required('events.view_attendee', return_403=True)\ndef event_change_attendees(request, event_id, active_tab='attendees'):\n if not has_access(request):\n raise PermissionDenied\n\n context = _create_details_context(request, event_id)\n context['active_tab'] = 'attendees'\n\n event = context['event']\n\n if not event.is_attendance_event():\n messages.error(request, _(\"Dette er ikke et p\u00e5meldingsarrangement.\"))\n return redirect('dashboard_event_details_active', event_id=event.id, active_tab='details')\n\n # AJAX\n if request.method == 'POST':\n if request.is_ajax and 'action' in request.POST:\n if not event.is_attendance_event:\n return HttpResponse(_('Dette er ikke et p\u00e5meldingsarrangement.'), status=400)\n\n return JsonResponse(event_ajax_handler(event, request))\n\n # NON AJAX\n context = get_base_context(request)\n\n context['event'] = event\n context['active_tab'] = active_tab\n\n extras = {}\n if event.is_attendance_event() and event.attendance_event.extras:\n for extra in event.attendance_event.extras.all():\n extras[extra] = {\"type\": extra, \"attending\": 0, \"waits\": 0, \"allergics\": []}\n\n count_extras(extras, \"attending\", event.attendance_event.attendees_qs)\n count_extras(extras, \"waits\", event.attendance_event.waitlist_qs)\n\n context['change_event_form'] = ChangeEventForm(instance=event)\n if event.is_attendance_event():\n context['change_attendance_form'] = ChangeAttendanceEventForm(instance=event.attendance_event)\n\n context['extras'] = extras\n context['change_event_form'] = ChangeEventForm(instance=event)\n\n return render(request, 'events/dashboard/details.html', context)\n\n\ndef count_extras(arr, inlist, atts):\n for att in atts:\n choice = \"Ikke valgt\" if att.extras is None else att.extras\n if att.extras not in arr:\n arr[choice] = {\"type\": choice, \"attending\": 0, \"waits\": 0, \"allergics\": []}\n ex = arr[choice]\n ex[inlist] += 1\n if att.user.allergies:\n what_list = \"p\u00e5meldt\" if inlist is \"attending\" else \"venteliste\"\n ex[\"allergics\"].append({\"user\": att.user, \"list\": what_list})\n\n\n@login_required\n@permission_required('events.view_reservation', return_403=True)\ndef event_change_reservation(request, event_id):\n if not has_access(request):\n raise PermissionDenied\n\n context = _create_details_context(request, event_id)\n context['active_tab'] = 'reservation'\n\n event = context['event']\n\n if not event.is_attendance_event():\n messages.error(request, _(\"Dette er ikke et p\u00e5meldingsarrangement.\"))\n return redirect('dashboard_event_details_active', event_id=event.id, active_tab='details')\n\n if request.method == 'POST':\n if not event.attendance_event.has_reservation:\n reservation = Reservation(\n attendance_event=event.attendance_event,\n seats=0\n )\n reservation.save()\n context['change_reservation_form'] = ChangeReservationForm(instance=reservation)\n else:\n form = ChangeReservationForm(request.POST, instance=event.attendance_event.reserved_seats)\n if form.is_valid():\n messages.success(request, _(\"Reservasjonen ble lagret.\"))\n form.save()\n context['change_reservation_form'] = form\n\n return render(request, 'events/dashboard/details.html', context)\n\n\n@login_required\n@permission_required('events.view_attendee', return_403=True)\ndef attendee_details(request, attendee_id):\n\n context = get_base_context(request)\n\n attendee = get_object_or_404(Attendee, pk=attendee_id)\n\n context['attendee'] = attendee\n return render(request, 'events/dashboard/attendee.html', context)\n", "path": "apps/events/dashboard/views.py"}]}
| 3,475 | 376 |
gh_patches_debug_27110
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-3225
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Need to add the Yum updates datasource to the documentation
The [PR](https://github.com/RedHatInsights/insights-core/pull/2993/files#diff-22151ef794ba196097984a47bf24b6759c261de6dc062ac541da099084e5c50a) adding this datasource did not add the datasource to the documentation [here](https://github.com/RedHatInsights/insights-core/blob/master/docs/custom_datasources_index.rst).
</issue>
<code>
[start of insights/specs/datasources/yum_updates.py]
1 """
2 Custom datasource for collecting yum updates
3 """
4 import json
5 import time
6
7 from insights import datasource, HostContext, SkipComponent
8 from insights.components.rhel_version import IsRhel7
9 from insights.core.spec_factory import DatasourceProvider
10
11 sorted_cmp = None
12 try:
13 # cmp_to_key is not available in python 2.6, but it has sorted function which accepts cmp function
14 def sorted_cmp(it, cmp):
15 from functools import cmp_to_key
16 return sorted(it, key=cmp_to_key(cmp))
17 except ImportError:
18 sorted_cmp = sorted
19
20
21 class UpdatesManager:
22 """ Performs package resolution on yum based systems """
23 def __init__(self):
24 import yum
25
26 self.base = yum.YumBase()
27 self.base.doGenericSetup(cache=1)
28 self.releasever = self.base.conf.yumvar['releasever']
29 self.basearch = self.base.conf.yumvar['basearch']
30 self.packages = []
31 self.repos = []
32 self.updict = {}
33
34 def __enter__(self):
35 return self
36
37 def __exit__(self, *args):
38 pass
39
40 @staticmethod
41 def pkg_cmp(a, b):
42 vercmp = a.verCMP(b)
43 if vercmp != 0:
44 return vercmp
45 if a.repoid != b.repoid:
46 return -1 if a.repoid < b.repoid else 1
47 return 0
48
49 def sorted_pkgs(self, pkgs):
50 return sorted_cmp(pkgs, self.pkg_cmp)
51
52 def load(self):
53 self.base.doRepoSetup()
54 self.base.doSackSetup()
55 self.packages = self.base.pkgSack.returnPackages()
56 self.repos = self.base.repos.repos
57 self._build_updict()
58
59 def _build_updict(self):
60 self.updict = {}
61 for pkg in self.packages:
62 self.updict.setdefault(pkg.na, []).append(pkg)
63
64 def enabled_repos(self):
65 return [repo.id for repo in self.base.repos.listEnabled()]
66
67 def installed_packages(self):
68 return self.base.rpmdb.returnPackages()
69
70 def updates(self, pkg):
71 nevra = pkg.nevra
72 updates_list = []
73 for upg in self.updict[pkg.na]:
74 if upg.verGT(pkg):
75 updates_list.append(upg)
76 return nevra, updates_list
77
78 @staticmethod
79 def pkg_nevra(pkg):
80 return "{}-{}:{}-{}.{}".format(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch)
81
82 @staticmethod
83 def pkg_repo(pkg):
84 return pkg.repoid
85
86 def advisory(self, pkg):
87 adv = self.base.upinfo.get_notice(pkg.nvr)
88 if adv:
89 return adv.get_metadata()['update_id']
90 return None
91
92 @staticmethod
93 def last_update():
94 return 0
95
96
97 @datasource(HostContext, [IsRhel7])
98 def yum_updates(_broker):
99 """
100 This datasource provides a list of available updates on the system.
101 It uses the yum python library installed locally, and collects list of
102 available package updates, along with advisory info where applicable.
103 """
104
105 if not _broker.get(IsRhel7):
106 raise SkipComponent("Yum updates currently only works on RHEL 7")
107
108 with UpdatesManager() as umgr:
109 umgr.load()
110
111 response = {
112 "releasever": umgr.releasever,
113 "basearch": umgr.basearch,
114 "update_list": {},
115 }
116
117 data = {'package_list': umgr.installed_packages()}
118 updates = {}
119 for pkg in data["package_list"]:
120 (nevra, updates_list) = umgr.updates(pkg)
121 updates[nevra] = updates_list
122 for (nevra, update_list) in updates.items():
123 if update_list:
124 out_list = []
125 for pkg in umgr.sorted_pkgs(update_list):
126 pkg_dict = {
127 "package": umgr.pkg_nevra(pkg),
128 "repository": umgr.pkg_repo(pkg),
129 "basearch": response["basearch"],
130 "releasever": response["releasever"],
131 }
132 erratum = umgr.advisory(pkg)
133 if erratum:
134 pkg_dict["erratum"] = erratum
135 out_list.append(pkg_dict)
136 response["update_list"][nevra] = {"available_updates": out_list}
137
138 ts = umgr.last_update()
139 if ts:
140 response["metadata_time"] = time.strftime("%FT%TZ", time.gmtime(ts))
141 return DatasourceProvider(content=json.dumps(response), relative_path='insights_commands/yum_updates_list')
142
[end of insights/specs/datasources/yum_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py
--- a/insights/specs/datasources/yum_updates.py
+++ b/insights/specs/datasources/yum_updates.py
@@ -70,7 +70,7 @@
def updates(self, pkg):
nevra = pkg.nevra
updates_list = []
- for upg in self.updict[pkg.na]:
+ for upg in self.updict.get(pkg.na, []):
if upg.verGT(pkg):
updates_list.append(upg)
return nevra, updates_list
@@ -100,6 +100,32 @@
This datasource provides a list of available updates on the system.
It uses the yum python library installed locally, and collects list of
available package updates, along with advisory info where applicable.
+
+ Sample data returned::
+
+ {
+ "releasever": "8",
+ "basearch": "x86_64",
+ "update_list": {
+ "NetworkManager-1:1.22.8-4.el8.x86_64": {
+ "available_updates": [
+ {
+ "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64",
+ "repository": "rhel-8-for-x86_64-baseos-rpms",
+ "basearch": "x86_64",
+ "releasever": "8",
+ "erratum": "RHSA-2020:3011"
+ }
+ ]
+ }
+ },
+ "metadata_time": "2021-01-01T09:39:45Z"
+ }
+
+ Returns:
+ list: List of available updates
+ Raises:
+ SkipComponent: Raised on systems different than RHEL 7
"""
if not _broker.get(IsRhel7):
|
{"golden_diff": "diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py\n--- a/insights/specs/datasources/yum_updates.py\n+++ b/insights/specs/datasources/yum_updates.py\n@@ -70,7 +70,7 @@\n def updates(self, pkg):\n nevra = pkg.nevra\n updates_list = []\n- for upg in self.updict[pkg.na]:\n+ for upg in self.updict.get(pkg.na, []):\n if upg.verGT(pkg):\n updates_list.append(upg)\n return nevra, updates_list\n@@ -100,6 +100,32 @@\n This datasource provides a list of available updates on the system.\n It uses the yum python library installed locally, and collects list of\n available package updates, along with advisory info where applicable.\n+\n+ Sample data returned::\n+\n+ {\n+ \"releasever\": \"8\",\n+ \"basearch\": \"x86_64\",\n+ \"update_list\": {\n+ \"NetworkManager-1:1.22.8-4.el8.x86_64\": {\n+ \"available_updates\": [\n+ {\n+ \"package\": \"NetworkManager-1:1.22.8-5.el8_2.x86_64\",\n+ \"repository\": \"rhel-8-for-x86_64-baseos-rpms\",\n+ \"basearch\": \"x86_64\",\n+ \"releasever\": \"8\",\n+ \"erratum\": \"RHSA-2020:3011\"\n+ }\n+ ]\n+ }\n+ },\n+ \"metadata_time\": \"2021-01-01T09:39:45Z\"\n+ }\n+\n+ Returns:\n+ list: List of available updates\n+ Raises:\n+ SkipComponent: Raised on systems different than RHEL 7\n \"\"\"\n \n if not _broker.get(IsRhel7):\n", "issue": "Need to add the Yum updates datasource to the documentation\nThe [PR](https://github.com/RedHatInsights/insights-core/pull/2993/files#diff-22151ef794ba196097984a47bf24b6759c261de6dc062ac541da099084e5c50a) adding this datasource did not add the datasource to the documentation [here](https://github.com/RedHatInsights/insights-core/blob/master/docs/custom_datasources_index.rst).\n", "before_files": [{"content": "\"\"\"\nCustom datasource for collecting yum updates\n\"\"\"\nimport json\nimport time\n\nfrom insights import datasource, HostContext, SkipComponent\nfrom insights.components.rhel_version import IsRhel7\nfrom insights.core.spec_factory import DatasourceProvider\n\nsorted_cmp = None\ntry:\n # cmp_to_key is not available in python 2.6, but it has sorted function which accepts cmp function\n def sorted_cmp(it, cmp):\n from functools import cmp_to_key\n return sorted(it, key=cmp_to_key(cmp))\nexcept ImportError:\n sorted_cmp = sorted\n\n\nclass UpdatesManager:\n \"\"\" Performs package resolution on yum based systems \"\"\"\n def __init__(self):\n import yum\n\n self.base = yum.YumBase()\n self.base.doGenericSetup(cache=1)\n self.releasever = self.base.conf.yumvar['releasever']\n self.basearch = self.base.conf.yumvar['basearch']\n self.packages = []\n self.repos = []\n self.updict = {}\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\n @staticmethod\n def pkg_cmp(a, b):\n vercmp = a.verCMP(b)\n if vercmp != 0:\n return vercmp\n if a.repoid != b.repoid:\n return -1 if a.repoid < b.repoid else 1\n return 0\n\n def sorted_pkgs(self, pkgs):\n return sorted_cmp(pkgs, self.pkg_cmp)\n\n def load(self):\n self.base.doRepoSetup()\n self.base.doSackSetup()\n self.packages = self.base.pkgSack.returnPackages()\n self.repos = self.base.repos.repos\n self._build_updict()\n\n def _build_updict(self):\n self.updict = {}\n for pkg in self.packages:\n self.updict.setdefault(pkg.na, []).append(pkg)\n\n def enabled_repos(self):\n return [repo.id for repo in self.base.repos.listEnabled()]\n\n def installed_packages(self):\n return self.base.rpmdb.returnPackages()\n\n def updates(self, pkg):\n nevra = pkg.nevra\n updates_list = []\n for upg in self.updict[pkg.na]:\n if upg.verGT(pkg):\n updates_list.append(upg)\n return nevra, updates_list\n\n @staticmethod\n def pkg_nevra(pkg):\n return \"{}-{}:{}-{}.{}\".format(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch)\n\n @staticmethod\n def pkg_repo(pkg):\n return pkg.repoid\n\n def advisory(self, pkg):\n adv = self.base.upinfo.get_notice(pkg.nvr)\n if adv:\n return adv.get_metadata()['update_id']\n return None\n\n @staticmethod\n def last_update():\n return 0\n\n\n@datasource(HostContext, [IsRhel7])\ndef yum_updates(_broker):\n \"\"\"\n This datasource provides a list of available updates on the system.\n It uses the yum python library installed locally, and collects list of\n available package updates, along with advisory info where applicable.\n \"\"\"\n\n if not _broker.get(IsRhel7):\n raise SkipComponent(\"Yum updates currently only works on RHEL 7\")\n\n with UpdatesManager() as umgr:\n umgr.load()\n\n response = {\n \"releasever\": umgr.releasever,\n \"basearch\": umgr.basearch,\n \"update_list\": {},\n }\n\n data = {'package_list': umgr.installed_packages()}\n updates = {}\n for pkg in data[\"package_list\"]:\n (nevra, updates_list) = umgr.updates(pkg)\n updates[nevra] = updates_list\n for (nevra, update_list) in updates.items():\n if update_list:\n out_list = []\n for pkg in umgr.sorted_pkgs(update_list):\n pkg_dict = {\n \"package\": umgr.pkg_nevra(pkg),\n \"repository\": umgr.pkg_repo(pkg),\n \"basearch\": response[\"basearch\"],\n \"releasever\": response[\"releasever\"],\n }\n erratum = umgr.advisory(pkg)\n if erratum:\n pkg_dict[\"erratum\"] = erratum\n out_list.append(pkg_dict)\n response[\"update_list\"][nevra] = {\"available_updates\": out_list}\n\n ts = umgr.last_update()\n if ts:\n response[\"metadata_time\"] = time.strftime(\"%FT%TZ\", time.gmtime(ts))\n return DatasourceProvider(content=json.dumps(response), relative_path='insights_commands/yum_updates_list')\n", "path": "insights/specs/datasources/yum_updates.py"}]}
| 2,006 | 466 |
gh_patches_debug_6740
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2915
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove CalledProcessError logs generated while creating a core3 archive
Failing commands [generate a CalledProcessError in the collection log during serialization](https://github.com/RedHatInsights/insights-core/blob/master/insights/core/serde.py#L194). Those errors are already captured in the archive metadata, and they're causing confusion for people inspecting the log for other kinds of errors. Let's not log them.
Related to https://bugzilla.redhat.com/show_bug.cgi?id=1920989.
</issue>
<code>
[start of insights/core/serde.py]
1 """
2 The serde module provides decorators that allow developers to register
3 serializer and deserializer functions for types. It also provides a
4 :py:class`Hydration` class that uses registered serde functions to save and
5 load objects from the file system. The Hydration class includes a
6 :py:func`Hydration.make_persister` method that returns a function appropriate
7 to register as an observer on a :py:class:`Broker`.
8 """
9 import json as ser
10 import logging
11 import os
12 import time
13 import traceback
14 from glob import glob
15 from functools import partial
16
17 from insights.core import dr
18 from insights.util import fs
19
20 log = logging.getLogger(__name__)
21
22 SERIALIZERS = {}
23 DESERIALIZERS = {}
24
25
26 def serializer(_type):
27 """
28 Decorator for serializers.
29
30 A serializer should accept two parameters: An object and a path which is
31 a directory on the filesystem where supplementary data can be stored. This
32 is most often useful for datasources. It should return a dictionary version
33 of the original object that contains only elements that can be serialized
34 to json.
35 """
36
37 def inner(func):
38 name = dr.get_name(_type)
39 if name in SERIALIZERS:
40 msg = "%s already has a serializer registered: %s"
41 raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))
42 SERIALIZERS[name] = func
43 return func
44 return inner
45
46
47 def deserializer(_type):
48 """
49 Decorator for deserializers.
50
51 A deserializer should accept three parameters: A type, a dictionary, and a
52 path that may contain supplementary data stored by its paired serializer.
53 If the serializer stores supplementary data, the relative path to it should
54 be somewhere in the dict of the second parameter.
55 """
56
57 def inner(func):
58 name = dr.get_name(_type)
59 if name in DESERIALIZERS:
60 msg = "%s already has a deserializer registered: %s"
61 raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))
62 DESERIALIZERS[name] = (_type, func)
63 return func
64 return inner
65
66
67 def get_serializer(obj):
68 """ Get a registered serializer for the given object.
69
70 This function walks the mro of obj looking for serializers.
71 Returns None if no valid serializer is found.
72 """
73 return SERIALIZERS.get(dr.get_name(type(obj)))
74
75
76 def get_deserializer(obj):
77 """ Returns a deserializer based on the fully qualified name string."""
78 return DESERIALIZERS.get(dr.get_name(type(obj)))
79
80
81 def serialize(obj, root=None):
82 to_dict = get_serializer(obj)
83 return {
84 "type": dr.get_name(type(obj)),
85 "object": to_dict(obj, root=root),
86 }
87
88
89 def deserialize(data, root=None):
90 try:
91 (_type, from_dict) = DESERIALIZERS.get(data["type"])
92 return from_dict(_type, data["object"], root=root)
93 except Exception:
94 raise Exception("Unrecognized type: %s" % data["type"])
95
96
97 def marshal(v, root=None, pool=None):
98 if v is None:
99 return
100 f = partial(serialize, root=root)
101 if isinstance(v, list):
102 if pool:
103 return list(pool.map(f, v))
104 else:
105 return [f(t) for t in v]
106 return f(v)
107
108
109 def unmarshal(data, root=None):
110 if data is None:
111 return
112 if isinstance(data, list):
113 return [deserialize(d, root=root) for d in data]
114 return deserialize(data, root=root)
115
116
117 class Hydration(object):
118 """
119 The Hydration class is responsible for saving and loading insights
120 components. It puts metadata about a component's evaluation in a metadata
121 file for the component and allows the serializer for a component to put raw
122 data beneath a working directory.
123 """
124 def __init__(self, root=None, meta_data="meta_data", data="data", pool=None):
125 self.root = root
126 self.meta_data = os.path.join(root, meta_data) if root else None
127 self.data = os.path.join(root, data) if root else None
128 self.ser_name = dr.get_base_module_name(ser)
129 self.created = False
130 self.pool = pool
131
132 def _hydrate_one(self, doc):
133 """ Returns (component, results, errors, duration) """
134 name = doc["name"]
135
136 key = dr.get_component_by_name(name)
137 if key is None:
138 raise ValueError("{} is not a loaded component.".format(name))
139 exec_time = doc["exec_time"]
140 ser_time = doc["ser_time"]
141 results = unmarshal(doc["results"], root=self.data)
142 return (key, results, exec_time, ser_time)
143
144 def hydrate(self, broker=None):
145 """
146 Loads a Broker from a previously saved one. A Broker is created if one
147 isn't provided.
148 """
149 broker = broker or dr.Broker()
150 for path in glob(os.path.join(self.meta_data, "*")):
151 try:
152 with open(path) as f:
153 doc = ser.load(f)
154 res = self._hydrate_one(doc)
155 comp, results, exec_time, ser_time = res
156 if results:
157 broker[comp] = results
158 broker.exec_times[comp] = exec_time + ser_time
159 except Exception as ex:
160 log.warning(ex)
161 return broker
162
163 def dehydrate(self, comp, broker):
164 """
165 Saves a component in the given broker to the file system.
166 """
167 if not self.meta_data:
168 raise Exception("Hydration meta_path not set. Can't dehydrate.")
169
170 if not self.created:
171 fs.ensure_path(self.meta_data, mode=0o770)
172 if self.data:
173 fs.ensure_path(self.data, mode=0o770)
174 self.created = True
175
176 c = comp
177 doc = None
178 try:
179 name = dr.get_name(c)
180 value = broker.get(c)
181 errors = [t for e in broker.exceptions.get(c, [])
182 for t in broker.tracebacks[e]]
183 doc = {
184 "name": name,
185 "exec_time": broker.exec_times.get(c),
186 "errors": errors
187 }
188
189 try:
190 start = time.time()
191 doc["results"] = marshal(value, root=self.data, pool=self.pool)
192 except Exception:
193 errors.append(traceback.format_exc())
194 log.debug(traceback.format_exc())
195 doc["results"] = None
196 finally:
197 doc["ser_time"] = time.time() - start
198 except Exception as ex:
199 log.exception(ex)
200 else:
201 if doc is not None and (doc["results"] or doc["errors"]):
202 try:
203 path = os.path.join(self.meta_data, name + "." + self.ser_name)
204 with open(path, "w") as f:
205 ser.dump(doc, f)
206 except Exception as boom:
207 log.error("Could not serialize %s to %s: %r" % (name, self.ser_name, boom))
208 if path:
209 fs.remove(path)
210
211 def make_persister(self, to_persist):
212 """
213 Returns a function that hydrates components as they are evaluated. The
214 function should be registered as an observer on a Broker just before
215 execution.
216
217 Args:
218 to_persist (set): Set of components to persist. Skip everything
219 else.
220 """
221
222 if not self.meta_data:
223 raise Exception("Root not set. Can't create persister.")
224
225 def persister(c, broker):
226 if c in to_persist:
227 self.dehydrate(c, broker)
228 return persister
229
[end of insights/core/serde.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/core/serde.py b/insights/core/serde.py
--- a/insights/core/serde.py
+++ b/insights/core/serde.py
@@ -191,7 +191,6 @@
doc["results"] = marshal(value, root=self.data, pool=self.pool)
except Exception:
errors.append(traceback.format_exc())
- log.debug(traceback.format_exc())
doc["results"] = None
finally:
doc["ser_time"] = time.time() - start
|
{"golden_diff": "diff --git a/insights/core/serde.py b/insights/core/serde.py\n--- a/insights/core/serde.py\n+++ b/insights/core/serde.py\n@@ -191,7 +191,6 @@\n doc[\"results\"] = marshal(value, root=self.data, pool=self.pool)\n except Exception:\n errors.append(traceback.format_exc())\n- log.debug(traceback.format_exc())\n doc[\"results\"] = None\n finally:\n doc[\"ser_time\"] = time.time() - start\n", "issue": "Remove CalledProcessError logs generated while creating a core3 archive\nFailing commands [generate a CalledProcessError in the collection log during serialization](https://github.com/RedHatInsights/insights-core/blob/master/insights/core/serde.py#L194). Those errors are already captured in the archive metadata, and they're causing confusion for people inspecting the log for other kinds of errors. Let's not log them.\r\n\r\nRelated to https://bugzilla.redhat.com/show_bug.cgi?id=1920989.\n", "before_files": [{"content": "\"\"\"\nThe serde module provides decorators that allow developers to register\nserializer and deserializer functions for types. It also provides a\n:py:class`Hydration` class that uses registered serde functions to save and\nload objects from the file system. The Hydration class includes a\n:py:func`Hydration.make_persister` method that returns a function appropriate\nto register as an observer on a :py:class:`Broker`.\n\"\"\"\nimport json as ser\nimport logging\nimport os\nimport time\nimport traceback\nfrom glob import glob\nfrom functools import partial\n\nfrom insights.core import dr\nfrom insights.util import fs\n\nlog = logging.getLogger(__name__)\n\nSERIALIZERS = {}\nDESERIALIZERS = {}\n\n\ndef serializer(_type):\n \"\"\"\n Decorator for serializers.\n\n A serializer should accept two parameters: An object and a path which is\n a directory on the filesystem where supplementary data can be stored. This\n is most often useful for datasources. It should return a dictionary version\n of the original object that contains only elements that can be serialized\n to json.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in SERIALIZERS:\n msg = \"%s already has a serializer registered: %s\"\n raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))\n SERIALIZERS[name] = func\n return func\n return inner\n\n\ndef deserializer(_type):\n \"\"\"\n Decorator for deserializers.\n\n A deserializer should accept three parameters: A type, a dictionary, and a\n path that may contain supplementary data stored by its paired serializer.\n If the serializer stores supplementary data, the relative path to it should\n be somewhere in the dict of the second parameter.\n \"\"\"\n\n def inner(func):\n name = dr.get_name(_type)\n if name in DESERIALIZERS:\n msg = \"%s already has a deserializer registered: %s\"\n raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))\n DESERIALIZERS[name] = (_type, func)\n return func\n return inner\n\n\ndef get_serializer(obj):\n \"\"\" Get a registered serializer for the given object.\n\n This function walks the mro of obj looking for serializers.\n Returns None if no valid serializer is found.\n \"\"\"\n return SERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef get_deserializer(obj):\n \"\"\" Returns a deserializer based on the fully qualified name string.\"\"\"\n return DESERIALIZERS.get(dr.get_name(type(obj)))\n\n\ndef serialize(obj, root=None):\n to_dict = get_serializer(obj)\n return {\n \"type\": dr.get_name(type(obj)),\n \"object\": to_dict(obj, root=root),\n }\n\n\ndef deserialize(data, root=None):\n try:\n (_type, from_dict) = DESERIALIZERS.get(data[\"type\"])\n return from_dict(_type, data[\"object\"], root=root)\n except Exception:\n raise Exception(\"Unrecognized type: %s\" % data[\"type\"])\n\n\ndef marshal(v, root=None, pool=None):\n if v is None:\n return\n f = partial(serialize, root=root)\n if isinstance(v, list):\n if pool:\n return list(pool.map(f, v))\n else:\n return [f(t) for t in v]\n return f(v)\n\n\ndef unmarshal(data, root=None):\n if data is None:\n return\n if isinstance(data, list):\n return [deserialize(d, root=root) for d in data]\n return deserialize(data, root=root)\n\n\nclass Hydration(object):\n \"\"\"\n The Hydration class is responsible for saving and loading insights\n components. It puts metadata about a component's evaluation in a metadata\n file for the component and allows the serializer for a component to put raw\n data beneath a working directory.\n \"\"\"\n def __init__(self, root=None, meta_data=\"meta_data\", data=\"data\", pool=None):\n self.root = root\n self.meta_data = os.path.join(root, meta_data) if root else None\n self.data = os.path.join(root, data) if root else None\n self.ser_name = dr.get_base_module_name(ser)\n self.created = False\n self.pool = pool\n\n def _hydrate_one(self, doc):\n \"\"\" Returns (component, results, errors, duration) \"\"\"\n name = doc[\"name\"]\n\n key = dr.get_component_by_name(name)\n if key is None:\n raise ValueError(\"{} is not a loaded component.\".format(name))\n exec_time = doc[\"exec_time\"]\n ser_time = doc[\"ser_time\"]\n results = unmarshal(doc[\"results\"], root=self.data)\n return (key, results, exec_time, ser_time)\n\n def hydrate(self, broker=None):\n \"\"\"\n Loads a Broker from a previously saved one. A Broker is created if one\n isn't provided.\n \"\"\"\n broker = broker or dr.Broker()\n for path in glob(os.path.join(self.meta_data, \"*\")):\n try:\n with open(path) as f:\n doc = ser.load(f)\n res = self._hydrate_one(doc)\n comp, results, exec_time, ser_time = res\n if results:\n broker[comp] = results\n broker.exec_times[comp] = exec_time + ser_time\n except Exception as ex:\n log.warning(ex)\n return broker\n\n def dehydrate(self, comp, broker):\n \"\"\"\n Saves a component in the given broker to the file system.\n \"\"\"\n if not self.meta_data:\n raise Exception(\"Hydration meta_path not set. Can't dehydrate.\")\n\n if not self.created:\n fs.ensure_path(self.meta_data, mode=0o770)\n if self.data:\n fs.ensure_path(self.data, mode=0o770)\n self.created = True\n\n c = comp\n doc = None\n try:\n name = dr.get_name(c)\n value = broker.get(c)\n errors = [t for e in broker.exceptions.get(c, [])\n for t in broker.tracebacks[e]]\n doc = {\n \"name\": name,\n \"exec_time\": broker.exec_times.get(c),\n \"errors\": errors\n }\n\n try:\n start = time.time()\n doc[\"results\"] = marshal(value, root=self.data, pool=self.pool)\n except Exception:\n errors.append(traceback.format_exc())\n log.debug(traceback.format_exc())\n doc[\"results\"] = None\n finally:\n doc[\"ser_time\"] = time.time() - start\n except Exception as ex:\n log.exception(ex)\n else:\n if doc is not None and (doc[\"results\"] or doc[\"errors\"]):\n try:\n path = os.path.join(self.meta_data, name + \".\" + self.ser_name)\n with open(path, \"w\") as f:\n ser.dump(doc, f)\n except Exception as boom:\n log.error(\"Could not serialize %s to %s: %r\" % (name, self.ser_name, boom))\n if path:\n fs.remove(path)\n\n def make_persister(self, to_persist):\n \"\"\"\n Returns a function that hydrates components as they are evaluated. The\n function should be registered as an observer on a Broker just before\n execution.\n\n Args:\n to_persist (set): Set of components to persist. Skip everything\n else.\n \"\"\"\n\n if not self.meta_data:\n raise Exception(\"Root not set. Can't create persister.\")\n\n def persister(c, broker):\n if c in to_persist:\n self.dehydrate(c, broker)\n return persister\n", "path": "insights/core/serde.py"}]}
| 2,885 | 116 |
gh_patches_debug_10322
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-2148
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On the newest docs, "Deploy a Streamlit app" page is empty
**Link to doc page in question (if any):**
https://docs.streamlit.io/en/stable/deploy_streamlit_app.html
**What you think the docs should say:**
For now, show placeholder content (link to heroku deploy instructions?)
In the future, describe what to do for S4A.
</issue>
<code>
[start of lib/streamlit/elements/file_uploader.py]
1 from streamlit import config
2
3 from streamlit.proto.FileUploader_pb2 import FileUploader as FileUploaderProto
4 from streamlit.report_thread import get_report_ctx
5 from streamlit.file_util import get_encoded_file_data
6 from streamlit.errors import StreamlitDeprecationWarning
7 from .utils import NoValue, _set_widget_id
8
9
10 class FileUploaderMixin:
11 def file_uploader(
12 dg, label, type=None, accept_multiple_files=False, key=None, **kwargs
13 ):
14 """Display a file uploader widget.
15 By default, uploaded files are limited to 200MB. You can configure
16 this using the `server.maxUploadSize` config option.
17
18 Parameters
19 ----------
20 label : str or None
21 A short label explaining to the user what this file uploader is for.
22
23 type : str or list of str or None
24 Array of allowed extensions. ['png', 'jpg']
25 The default is None, which means all extensions are allowed.
26
27 accept_multiple_files : bool
28 If True, allows the user to upload multiple files at the same time,
29 in which case the return value will be a list of files.
30 Default: False
31
32 key : str
33 An optional string to use as the unique key for the widget.
34 If this is omitted, a key will be generated for the widget
35 based on its content. Multiple widgets of the same type may
36 not share the same key.
37
38 Returns
39 -------
40 None or UploadedFile or list of UploadedFile
41 - If allow_multiple_files is False, returns either None or
42 an UploadedFile object.
43 - If allow_multiple_files is True, returns a list with the
44 uploaded files as UploadedFile objects. If no files were
45 uploaded, returns an empty list.
46 The UploadedFile class is a subclass of BytesIO, and therefore
47 it is "file-like". This means you can pass them anywhere where
48 a file is expected.
49
50 Examples
51 --------
52 Insert a file uploader that accepts a single file at a time:
53
54 >>> uploaded_file = st.file_uploader("Choose a file")
55 >>> if uploaded_file is not None:
56 ... # To read file as bytes:
57 ... bytes_data = uploaded_file.read()
58 ... st.write(bytes_data)
59 >>>
60 ... # To convert to a string based IO:
61 ... stringio = StringIO(uploaded_file.decode("utf-8"))
62 ... st.write(stringio)
63 >>>
64 ... # To read file as string:
65 ... string_data = stringio.read()
66 ... st.write(string_data)
67 >>>
68 ... # Can be used wherever a "file-like" object is accepted:
69 ... dataframe = pd.read_csv(uploaded_file)
70 ... st.write(dataframe)
71
72 Insert a file uploader that accepts multiple files at a time:
73
74 >>> uploaded_files = st.file_uploader("Choose a CSV file", accept_multiple_files=True)
75 >>> for uploaded_file in uploaded_files:
76 ... bytes_data = uploaded_file.read()
77 ... st.write("filename:", uploaded_file.name)
78 ... st.write(bytes_data)
79 """
80
81 if type:
82 if isinstance(type, str):
83 type = [type]
84
85 # May need a regex or a library to validate file types are valid
86 # extensions.
87 type = [
88 file_type if file_type[0] == "." else f".{file_type}"
89 for file_type in type
90 ]
91
92 has_encoding = "encoding" in kwargs
93 show_deprecation_warning = config.get_option(
94 "deprecation.showfileUploaderEncoding"
95 )
96
97 if show_deprecation_warning and has_encoding:
98 dg.exception(FileUploaderEncodingWarning()) # type: ignore
99
100 file_uploader_proto = FileUploaderProto()
101 file_uploader_proto.label = label
102 file_uploader_proto.type[:] = type if type is not None else []
103 file_uploader_proto.max_upload_size_mb = config.get_option(
104 "server.maxUploadSize"
105 )
106 file_uploader_proto.multiple_files = accept_multiple_files
107 _set_widget_id("file_uploader", file_uploader_proto, user_key=key)
108
109 files = None
110 ctx = get_report_ctx()
111 if ctx is not None:
112 files = ctx.uploaded_file_mgr.get_files(
113 session_id=ctx.session_id, widget_id=file_uploader_proto.id
114 )
115
116 if files is None or len(files) == 0:
117 return_value = [] if accept_multiple_files else NoValue
118 else:
119 return_value = files if accept_multiple_files else files[0]
120
121 return dg._enqueue("file_uploader", file_uploader_proto, return_value) # type: ignore
122
123
124 class FileUploaderEncodingWarning(StreamlitDeprecationWarning):
125 def __init__(self):
126 msg = self._get_message()
127 config_option = "deprecation.showfileUploaderEncoding"
128 super(FileUploaderEncodingWarning, self).__init__(
129 msg=msg, config_option=config_option
130 )
131
132 def _get_message(self):
133 return """
134 The behavior of `st.file_uploader` no longer autodetects the file's encoding.
135 This means that _all files_ will be returned as binary buffers. If you need to
136 work with a string buffer, you can convert to a StringIO by decoding the binary
137 buffer as shown below:
138
139 ```
140 file_buffer = st.file_uploader(...)
141 string_io = file_buffer.decode()
142 ```
143 """
144
[end of lib/streamlit/elements/file_uploader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/streamlit/elements/file_uploader.py b/lib/streamlit/elements/file_uploader.py
--- a/lib/streamlit/elements/file_uploader.py
+++ b/lib/streamlit/elements/file_uploader.py
@@ -43,6 +43,7 @@
- If allow_multiple_files is True, returns a list with the
uploaded files as UploadedFile objects. If no files were
uploaded, returns an empty list.
+
The UploadedFile class is a subclass of BytesIO, and therefore
it is "file-like". This means you can pass them anywhere where
a file is expected.
|
{"golden_diff": "diff --git a/lib/streamlit/elements/file_uploader.py b/lib/streamlit/elements/file_uploader.py\n--- a/lib/streamlit/elements/file_uploader.py\n+++ b/lib/streamlit/elements/file_uploader.py\n@@ -43,6 +43,7 @@\n - If allow_multiple_files is True, returns a list with the\n uploaded files as UploadedFile objects. If no files were\n uploaded, returns an empty list.\n+\n The UploadedFile class is a subclass of BytesIO, and therefore\n it is \"file-like\". This means you can pass them anywhere where\n a file is expected.\n", "issue": "On the newest docs, \"Deploy a Streamlit app\" page is empty\n**Link to doc page in question (if any):**\r\n\r\nhttps://docs.streamlit.io/en/stable/deploy_streamlit_app.html\r\n\r\n**What you think the docs should say:**\r\n\r\nFor now, show placeholder content (link to heroku deploy instructions?)\r\nIn the future, describe what to do for S4A.\r\n\n", "before_files": [{"content": "from streamlit import config\n\nfrom streamlit.proto.FileUploader_pb2 import FileUploader as FileUploaderProto\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.file_util import get_encoded_file_data\nfrom streamlit.errors import StreamlitDeprecationWarning\nfrom .utils import NoValue, _set_widget_id\n\n\nclass FileUploaderMixin:\n def file_uploader(\n dg, label, type=None, accept_multiple_files=False, key=None, **kwargs\n ):\n \"\"\"Display a file uploader widget.\n By default, uploaded files are limited to 200MB. You can configure\n this using the `server.maxUploadSize` config option.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this file uploader is for.\n\n type : str or list of str or None\n Array of allowed extensions. ['png', 'jpg']\n The default is None, which means all extensions are allowed.\n\n accept_multiple_files : bool\n If True, allows the user to upload multiple files at the same time,\n in which case the return value will be a list of files.\n Default: False\n\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n None or UploadedFile or list of UploadedFile\n - If allow_multiple_files is False, returns either None or\n an UploadedFile object.\n - If allow_multiple_files is True, returns a list with the\n uploaded files as UploadedFile objects. If no files were\n uploaded, returns an empty list.\n The UploadedFile class is a subclass of BytesIO, and therefore\n it is \"file-like\". This means you can pass them anywhere where\n a file is expected.\n\n Examples\n --------\n Insert a file uploader that accepts a single file at a time:\n\n >>> uploaded_file = st.file_uploader(\"Choose a file\")\n >>> if uploaded_file is not None:\n ... # To read file as bytes:\n ... bytes_data = uploaded_file.read()\n ... st.write(bytes_data)\n >>>\n ... # To convert to a string based IO:\n ... stringio = StringIO(uploaded_file.decode(\"utf-8\"))\n ... st.write(stringio)\n >>>\n ... # To read file as string:\n ... string_data = stringio.read()\n ... st.write(string_data)\n >>>\n ... # Can be used wherever a \"file-like\" object is accepted:\n ... dataframe = pd.read_csv(uploaded_file)\n ... st.write(dataframe)\n\n Insert a file uploader that accepts multiple files at a time:\n\n >>> uploaded_files = st.file_uploader(\"Choose a CSV file\", accept_multiple_files=True)\n >>> for uploaded_file in uploaded_files:\n ... bytes_data = uploaded_file.read()\n ... st.write(\"filename:\", uploaded_file.name)\n ... st.write(bytes_data)\n \"\"\"\n\n if type:\n if isinstance(type, str):\n type = [type]\n\n # May need a regex or a library to validate file types are valid\n # extensions.\n type = [\n file_type if file_type[0] == \".\" else f\".{file_type}\"\n for file_type in type\n ]\n\n has_encoding = \"encoding\" in kwargs\n show_deprecation_warning = config.get_option(\n \"deprecation.showfileUploaderEncoding\"\n )\n\n if show_deprecation_warning and has_encoding:\n dg.exception(FileUploaderEncodingWarning()) # type: ignore\n\n file_uploader_proto = FileUploaderProto()\n file_uploader_proto.label = label\n file_uploader_proto.type[:] = type if type is not None else []\n file_uploader_proto.max_upload_size_mb = config.get_option(\n \"server.maxUploadSize\"\n )\n file_uploader_proto.multiple_files = accept_multiple_files\n _set_widget_id(\"file_uploader\", file_uploader_proto, user_key=key)\n\n files = None\n ctx = get_report_ctx()\n if ctx is not None:\n files = ctx.uploaded_file_mgr.get_files(\n session_id=ctx.session_id, widget_id=file_uploader_proto.id\n )\n\n if files is None or len(files) == 0:\n return_value = [] if accept_multiple_files else NoValue\n else:\n return_value = files if accept_multiple_files else files[0]\n\n return dg._enqueue(\"file_uploader\", file_uploader_proto, return_value) # type: ignore\n\n\nclass FileUploaderEncodingWarning(StreamlitDeprecationWarning):\n def __init__(self):\n msg = self._get_message()\n config_option = \"deprecation.showfileUploaderEncoding\"\n super(FileUploaderEncodingWarning, self).__init__(\n msg=msg, config_option=config_option\n )\n\n def _get_message(self):\n return \"\"\"\nThe behavior of `st.file_uploader` no longer autodetects the file's encoding.\nThis means that _all files_ will be returned as binary buffers. If you need to\nwork with a string buffer, you can convert to a StringIO by decoding the binary\nbuffer as shown below:\n\n```\nfile_buffer = st.file_uploader(...)\nstring_io = file_buffer.decode()\n```\n \"\"\"\n", "path": "lib/streamlit/elements/file_uploader.py"}]}
| 2,117 | 134 |
gh_patches_debug_4347
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-5395
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Converting notebooks with spaces in their names to RST gives broken images
I am using `ipython nbconvert --to rst example1.ipynb` to convert my example notebooks into reStructuredText, for incorporation into my package's Sphinx documentation. This works quite well, unless the filename has a space in it. In this case, any image files from my notebooks are lost when I run Sphinx's `make html`.
My guess is that the problem seems is in the generated rst file, where the `.. image` command may need to be escaped or quoted somehow to work with spaces in the filename.
I note that a similar issue was reported and resolved for latex output in issue #3774, however the solution was specific to latex.
</issue>
<code>
[start of IPython/nbconvert/filters/markdown.py]
1 """Markdown filters
2 This file contains a collection of utility filters for dealing with
3 markdown within Jinja templates.
4 """
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib imports
19 import os
20 import subprocess
21 import warnings
22 from io import TextIOWrapper, BytesIO
23
24 # IPython imports
25 from IPython.nbconvert.utils.pandoc import pandoc
26 from IPython.nbconvert.utils.exceptions import ConversionException
27 from IPython.utils.process import get_output_error_code
28 from IPython.utils.py3compat import cast_bytes
29 from IPython.utils.version import check_version
30
31 #-----------------------------------------------------------------------------
32 # Functions
33 #-----------------------------------------------------------------------------
34 marked = os.path.join(os.path.dirname(__file__), "marked.js")
35 _node = None
36
37 __all__ = [
38 'markdown2html',
39 'markdown2html_pandoc',
40 'markdown2html_marked',
41 'markdown2latex',
42 'markdown2rst',
43 ]
44
45 class NodeJSMissing(ConversionException):
46 """Exception raised when node.js is missing."""
47 pass
48
49 def markdown2latex(source):
50 """Convert a markdown string to LaTeX via pandoc.
51
52 This function will raise an error if pandoc is not installed.
53 Any error messages generated by pandoc are printed to stderr.
54
55 Parameters
56 ----------
57 source : string
58 Input string, assumed to be valid markdown.
59
60 Returns
61 -------
62 out : string
63 Output as returned by pandoc.
64 """
65 return pandoc(source, 'markdown', 'latex')
66
67 def markdown2html(source):
68 """Convert a markdown string to HTML"""
69 global _node
70 if _node is None:
71 # prefer md2html via marked if node.js >= 0.9.12 is available
72 # node is called nodejs on debian, so try that first
73 _node = 'nodejs'
74 if not _verify_node(_node):
75 _node = 'node'
76 if not _verify_node(_node):
77 warnings.warn( "Node.js 0.9.12 or later wasn't found.\n" +
78 "Nbconvert will try to use Pandoc instead.")
79 _node = False
80 if _node:
81 return markdown2html_marked(source)
82 else:
83 return markdown2html_pandoc(source)
84
85 def markdown2html_pandoc(source):
86 """Convert a markdown string to HTML via pandoc"""
87 return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])
88
89 def markdown2html_marked(source, encoding='utf-8'):
90 """Convert a markdown string to HTML via marked"""
91 command = [_node, marked]
92 try:
93 p = subprocess.Popen(command,
94 stdin=subprocess.PIPE, stdout=subprocess.PIPE
95 )
96 except OSError as e:
97 raise NodeJSMissing(
98 "The command '%s' returned an error: %s.\n" % (" ".join(command), e) +
99 "Please check that Node.js is installed."
100 )
101 out, _ = p.communicate(cast_bytes(source, encoding))
102 out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()
103 return out.rstrip('\n')
104
105 def markdown2rst(source):
106 """Convert a markdown string to LaTeX via pandoc.
107
108 This function will raise an error if pandoc is not installed.
109 Any error messages generated by pandoc are printed to stderr.
110
111 Parameters
112 ----------
113 source : string
114 Input string, assumed to be valid markdown.
115
116 Returns
117 -------
118 out : string
119 Output as returned by pandoc.
120 """
121 return pandoc(source, 'markdown', 'rst')
122
123 def _verify_node(cmd):
124 """Verify that the node command exists and is at least the minimum supported
125 version of node.
126
127 Parameters
128 ----------
129 cmd : string
130 Node command to verify (i.e 'node')."""
131 try:
132 out, err, return_code = get_output_error_code([cmd, '--version'])
133 except OSError:
134 # Command not found
135 return False
136 if return_code:
137 # Command error
138 return False
139 return check_version(out.lstrip('v'), '0.9.12')
140
[end of IPython/nbconvert/filters/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py
--- a/IPython/nbconvert/filters/markdown.py
+++ b/IPython/nbconvert/filters/markdown.py
@@ -103,7 +103,7 @@
return out.rstrip('\n')
def markdown2rst(source):
- """Convert a markdown string to LaTeX via pandoc.
+ """Convert a markdown string to ReST via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
|
{"golden_diff": "diff --git a/IPython/nbconvert/filters/markdown.py b/IPython/nbconvert/filters/markdown.py\n--- a/IPython/nbconvert/filters/markdown.py\n+++ b/IPython/nbconvert/filters/markdown.py\n@@ -103,7 +103,7 @@\n return out.rstrip('\\n')\n \n def markdown2rst(source):\n- \"\"\"Convert a markdown string to LaTeX via pandoc.\n+ \"\"\"Convert a markdown string to ReST via pandoc.\n \n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n", "issue": "Converting notebooks with spaces in their names to RST gives broken images\nI am using `ipython nbconvert --to rst example1.ipynb` to convert my example notebooks into reStructuredText, for incorporation into my package's Sphinx documentation. This works quite well, unless the filename has a space in it. In this case, any image files from my notebooks are lost when I run Sphinx's `make html`. \n\nMy guess is that the problem seems is in the generated rst file, where the `.. image` command may need to be escaped or quoted somehow to work with spaces in the filename.\n\nI note that a similar issue was reported and resolved for latex output in issue #3774, however the solution was specific to latex.\n\n", "before_files": [{"content": "\"\"\"Markdown filters\nThis file contains a collection of utility filters for dealing with \nmarkdown within Jinja templates.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib imports\nimport os\nimport subprocess\nimport warnings\nfrom io import TextIOWrapper, BytesIO\n\n# IPython imports\nfrom IPython.nbconvert.utils.pandoc import pandoc\nfrom IPython.nbconvert.utils.exceptions import ConversionException\nfrom IPython.utils.process import get_output_error_code\nfrom IPython.utils.py3compat import cast_bytes\nfrom IPython.utils.version import check_version\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\nmarked = os.path.join(os.path.dirname(__file__), \"marked.js\")\n_node = None\n\n__all__ = [\n 'markdown2html',\n 'markdown2html_pandoc',\n 'markdown2html_marked',\n 'markdown2latex',\n 'markdown2rst',\n]\n\nclass NodeJSMissing(ConversionException):\n \"\"\"Exception raised when node.js is missing.\"\"\"\n pass\n\ndef markdown2latex(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'latex')\n\ndef markdown2html(source):\n \"\"\"Convert a markdown string to HTML\"\"\"\n global _node\n if _node is None:\n # prefer md2html via marked if node.js >= 0.9.12 is available\n # node is called nodejs on debian, so try that first\n _node = 'nodejs'\n if not _verify_node(_node):\n _node = 'node'\n if not _verify_node(_node):\n warnings.warn( \"Node.js 0.9.12 or later wasn't found.\\n\" +\n \"Nbconvert will try to use Pandoc instead.\")\n _node = False\n if _node:\n return markdown2html_marked(source)\n else:\n return markdown2html_pandoc(source)\n\ndef markdown2html_pandoc(source):\n \"\"\"Convert a markdown string to HTML via pandoc\"\"\"\n return pandoc(source, 'markdown', 'html', extra_args=['--mathjax'])\n\ndef markdown2html_marked(source, encoding='utf-8'):\n \"\"\"Convert a markdown string to HTML via marked\"\"\"\n command = [_node, marked]\n try:\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n except OSError as e:\n raise NodeJSMissing(\n \"The command '%s' returned an error: %s.\\n\" % (\" \".join(command), e) +\n \"Please check that Node.js is installed.\"\n )\n out, _ = p.communicate(cast_bytes(source, encoding))\n out = TextIOWrapper(BytesIO(out), encoding, 'replace').read()\n return out.rstrip('\\n')\n\ndef markdown2rst(source):\n \"\"\"Convert a markdown string to LaTeX via pandoc.\n\n This function will raise an error if pandoc is not installed.\n Any error messages generated by pandoc are printed to stderr.\n\n Parameters\n ----------\n source : string\n Input string, assumed to be valid markdown.\n\n Returns\n -------\n out : string\n Output as returned by pandoc.\n \"\"\"\n return pandoc(source, 'markdown', 'rst')\n\ndef _verify_node(cmd):\n \"\"\"Verify that the node command exists and is at least the minimum supported\n version of node.\n\n Parameters\n ----------\n cmd : string\n Node command to verify (i.e 'node').\"\"\"\n try:\n out, err, return_code = get_output_error_code([cmd, '--version'])\n except OSError:\n # Command not found\n return False\n if return_code:\n # Command error\n return False\n return check_version(out.lstrip('v'), '0.9.12')\n", "path": "IPython/nbconvert/filters/markdown.py"}]}
| 1,958 | 135 |
gh_patches_debug_16961
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-5977
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "totals" to "view feedback" page
### Description
It would be useful to add totals for each column in the "view feedback" page.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/nomcom/templatetags/nomcom_tags.py]
1 # Copyright The IETF Trust 2013-2019, All Rights Reserved
2 import os
3 import tempfile
4 import re
5
6 from django import template
7 from django.conf import settings
8 from django.template.defaultfilters import linebreaksbr, force_escape
9 from django.utils.encoding import force_str, DjangoUnicodeDecodeError
10 from django.utils.safestring import mark_safe
11
12 import debug # pyflakes:ignore
13
14 from ietf.nomcom.utils import get_nomcom_by_year, retrieve_nomcom_private_key
15 from ietf.person.models import Person
16 from ietf.utils.log import log
17 from ietf.utils.pipe import pipe
18
19
20 register = template.Library()
21
22
23 @register.filter
24 def is_chair_or_advisor(user, year):
25 if not user or not year:
26 return False
27 nomcom = get_nomcom_by_year(year=year)
28 return nomcom.group.has_role(user, ["chair","advisor"])
29
30
31 @register.filter
32 def has_publickey(nomcom):
33 return nomcom and nomcom.public_key and True or False
34
35 @register.filter
36 def lookup(container,key):
37 return container and container.get(key,None)
38
39 @register.filter
40 def formatted_email(address):
41 person = None
42 addrmatch = re.search('<([^>]+)>',address)
43 if addrmatch:
44 addr = addrmatch.group(1)
45 else:
46 addr = address
47 if addr:
48 persons = Person.objects.filter(email__address__in=[addr])
49 person = persons and persons[0] or None
50 if person and person.name:
51 return "%s <%s>" % (person.plain_name(), addr)
52 else:
53 return address
54
55
56 @register.simple_tag
57 def decrypt(string, request, year, plain=False):
58 try:
59 key = retrieve_nomcom_private_key(request, year)
60 except UnicodeError:
61 return f"-*- Encrypted text [Error retrieving private key, contact the secretariat ({settings.SECRETARIAT_SUPPORT_EMAIL})]"
62 if not key:
63 return '-*- Encrypted text [No private key provided] -*-'
64
65 encrypted_file = tempfile.NamedTemporaryFile(delete=False)
66 encrypted_file.write(string)
67 encrypted_file.close()
68
69 command = "%s smime -decrypt -in %s -inkey /dev/stdin"
70 code, out, error = pipe(command % (settings.OPENSSL_COMMAND,
71 encrypted_file.name), key)
72 try:
73 out = force_str(out)
74 except DjangoUnicodeDecodeError:
75 pass
76 if code != 0:
77 log("openssl error: %s:\n Error %s: %s" %(command, code, error))
78
79 os.unlink(encrypted_file.name)
80
81 if error:
82 return '-*- Encrypted text [Your private key is invalid] -*-'
83
84 if not plain:
85 return force_escape(linebreaksbr(out))
86 return mark_safe(force_escape(out))
87
[end of ietf/nomcom/templatetags/nomcom_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ietf/nomcom/templatetags/nomcom_tags.py b/ietf/nomcom/templatetags/nomcom_tags.py
--- a/ietf/nomcom/templatetags/nomcom_tags.py
+++ b/ietf/nomcom/templatetags/nomcom_tags.py
@@ -1,8 +1,10 @@
-# Copyright The IETF Trust 2013-2019, All Rights Reserved
+# Copyright The IETF Trust 2013-2023, All Rights Reserved
import os
import tempfile
import re
+from collections import defaultdict
+
from django import template
from django.conf import settings
from django.template.defaultfilters import linebreaksbr, force_escape
@@ -84,3 +86,11 @@
if not plain:
return force_escape(linebreaksbr(out))
return mark_safe(force_escape(out))
+
[email protected]
+def feedback_totals(staterank_list):
+ totals = defaultdict(lambda: 0)
+ for fb_dict in staterank_list:
+ for fbtype_name, fbtype_count, _ in fb_dict['feedback']:
+ totals[fbtype_name] += fbtype_count
+ return totals.values()
|
{"golden_diff": "diff --git a/ietf/nomcom/templatetags/nomcom_tags.py b/ietf/nomcom/templatetags/nomcom_tags.py\n--- a/ietf/nomcom/templatetags/nomcom_tags.py\n+++ b/ietf/nomcom/templatetags/nomcom_tags.py\n@@ -1,8 +1,10 @@\n-# Copyright The IETF Trust 2013-2019, All Rights Reserved\n+# Copyright The IETF Trust 2013-2023, All Rights Reserved\n import os\n import tempfile\n import re\n \n+from collections import defaultdict\n+\n from django import template\n from django.conf import settings\n from django.template.defaultfilters import linebreaksbr, force_escape\n@@ -84,3 +86,11 @@\n if not plain:\n return force_escape(linebreaksbr(out))\n return mark_safe(force_escape(out))\n+\[email protected]\n+def feedback_totals(staterank_list):\n+ totals = defaultdict(lambda: 0)\n+ for fb_dict in staterank_list:\n+ for fbtype_name, fbtype_count, _ in fb_dict['feedback']:\n+ totals[fbtype_name] += fbtype_count\n+ return totals.values()\n", "issue": "Add \"totals\" to \"view feedback\" page\n### Description\n\nIt would be useful to add totals for each column in the \"view feedback\" page.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2019, All Rights Reserved\nimport os\nimport tempfile\nimport re\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import linebreaksbr, force_escape\nfrom django.utils.encoding import force_str, DjangoUnicodeDecodeError\nfrom django.utils.safestring import mark_safe\n\nimport debug # pyflakes:ignore\n\nfrom ietf.nomcom.utils import get_nomcom_by_year, retrieve_nomcom_private_key\nfrom ietf.person.models import Person\nfrom ietf.utils.log import log\nfrom ietf.utils.pipe import pipe\n\n\nregister = template.Library()\n\n\[email protected]\ndef is_chair_or_advisor(user, year):\n if not user or not year:\n return False\n nomcom = get_nomcom_by_year(year=year)\n return nomcom.group.has_role(user, [\"chair\",\"advisor\"])\n\n\[email protected]\ndef has_publickey(nomcom):\n return nomcom and nomcom.public_key and True or False\n\[email protected]\ndef lookup(container,key):\n return container and container.get(key,None)\n\[email protected]\ndef formatted_email(address):\n person = None\n addrmatch = re.search('<([^>]+)>',address)\n if addrmatch:\n addr = addrmatch.group(1)\n else:\n addr = address\n if addr:\n persons = Person.objects.filter(email__address__in=[addr])\n person = persons and persons[0] or None\n if person and person.name:\n return \"%s <%s>\" % (person.plain_name(), addr) \n else:\n return address\n\n\[email protected]_tag\ndef decrypt(string, request, year, plain=False):\n try:\n key = retrieve_nomcom_private_key(request, year)\n except UnicodeError:\n return f\"-*- Encrypted text [Error retrieving private key, contact the secretariat ({settings.SECRETARIAT_SUPPORT_EMAIL})]\"\n if not key:\n return '-*- Encrypted text [No private key provided] -*-'\n\n encrypted_file = tempfile.NamedTemporaryFile(delete=False)\n encrypted_file.write(string)\n encrypted_file.close()\n\n command = \"%s smime -decrypt -in %s -inkey /dev/stdin\"\n code, out, error = pipe(command % (settings.OPENSSL_COMMAND,\n encrypted_file.name), key)\n try:\n out = force_str(out)\n except DjangoUnicodeDecodeError:\n pass\n if code != 0:\n log(\"openssl error: %s:\\n Error %s: %s\" %(command, code, error))\n\n os.unlink(encrypted_file.name)\n\n if error:\n return '-*- Encrypted text [Your private key is invalid] -*-'\n\n if not plain:\n return force_escape(linebreaksbr(out))\n return mark_safe(force_escape(out))\n", "path": "ietf/nomcom/templatetags/nomcom_tags.py"}]}
| 1,415 | 276 |
gh_patches_debug_23210
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-68
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create shortcut function for GraphQLMiddleware.make_simple_server
Getting started with Ariadne could be made even simpler by providing shortcut function abstracting the `GraphQLMiddleware` away on first contact, thus saving users possible confusion about what they really are doing.
</issue>
<code>
[start of ariadne/utils.py]
1 from graphql import parse
2
3
4 def gql(value: str) -> str:
5 parse(value)
6 return value
7
[end of ariadne/utils.py]
[start of ariadne/__init__.py]
1 from .executable_schema import make_executable_schema
2 from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
3 from .utils import gql
4 from .wsgi_middleware import GraphQLMiddleware
5
6 __all__ = [
7 "GraphQLMiddleware",
8 "add_resolve_functions_to_schema",
9 "default_resolver",
10 "make_executable_schema",
11 "resolve_to",
12 "gql",
13 ]
14
[end of ariadne/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ariadne/__init__.py b/ariadne/__init__.py
--- a/ariadne/__init__.py
+++ b/ariadne/__init__.py
@@ -1,6 +1,6 @@
from .executable_schema import make_executable_schema
from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to
-from .utils import gql
+from .utils import gql, start_simple_server
from .wsgi_middleware import GraphQLMiddleware
__all__ = [
@@ -10,4 +10,5 @@
"make_executable_schema",
"resolve_to",
"gql",
+ "start_simple_server",
]
diff --git a/ariadne/utils.py b/ariadne/utils.py
--- a/ariadne/utils.py
+++ b/ariadne/utils.py
@@ -1,6 +1,26 @@
+from typing import List, Union
+
from graphql import parse
+from .wsgi_middleware import GraphQLMiddleware
+
def gql(value: str) -> str:
parse(value)
return value
+
+
+def start_simple_server(
+ type_defs: Union[str, List[str]],
+ resolvers: Union[dict, List[dict]],
+ host: str = "127.0.0.1",
+ port: int = 8888,
+):
+ try:
+ print("Simple GraphQL server is running on the http://%s:%s" % (host, port))
+ graphql_server = GraphQLMiddleware.make_simple_server(
+ type_defs, resolvers, host, port
+ )
+ graphql_server.serve_forever()
+ except KeyboardInterrupt:
+ pass
|
{"golden_diff": "diff --git a/ariadne/__init__.py b/ariadne/__init__.py\n--- a/ariadne/__init__.py\n+++ b/ariadne/__init__.py\n@@ -1,6 +1,6 @@\n from .executable_schema import make_executable_schema\n from .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\n-from .utils import gql\n+from .utils import gql, start_simple_server\n from .wsgi_middleware import GraphQLMiddleware\n \n __all__ = [\n@@ -10,4 +10,5 @@\n \"make_executable_schema\",\n \"resolve_to\",\n \"gql\",\n+ \"start_simple_server\",\n ]\ndiff --git a/ariadne/utils.py b/ariadne/utils.py\n--- a/ariadne/utils.py\n+++ b/ariadne/utils.py\n@@ -1,6 +1,26 @@\n+from typing import List, Union\n+\n from graphql import parse\n \n+from .wsgi_middleware import GraphQLMiddleware\n+\n \n def gql(value: str) -> str:\n parse(value)\n return value\n+\n+\n+def start_simple_server(\n+ type_defs: Union[str, List[str]],\n+ resolvers: Union[dict, List[dict]],\n+ host: str = \"127.0.0.1\",\n+ port: int = 8888,\n+):\n+ try:\n+ print(\"Simple GraphQL server is running on the http://%s:%s\" % (host, port))\n+ graphql_server = GraphQLMiddleware.make_simple_server(\n+ type_defs, resolvers, host, port\n+ )\n+ graphql_server.serve_forever()\n+ except KeyboardInterrupt:\n+ pass\n", "issue": "Create shortcut function for GraphQLMiddleware.make_simple_server\nGetting started with Ariadne could be made even simpler by providing shortcut function abstracting the `GraphQLMiddleware` away on first contact, thus saving users possible confusion about what they really are doing.\n", "before_files": [{"content": "from graphql import parse\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n", "path": "ariadne/utils.py"}, {"content": "from .executable_schema import make_executable_schema\nfrom .resolvers import add_resolve_functions_to_schema, default_resolver, resolve_to\nfrom .utils import gql\nfrom .wsgi_middleware import GraphQLMiddleware\n\n__all__ = [\n \"GraphQLMiddleware\",\n \"add_resolve_functions_to_schema\",\n \"default_resolver\",\n \"make_executable_schema\",\n \"resolve_to\",\n \"gql\",\n]\n", "path": "ariadne/__init__.py"}]}
| 740 | 375 |
gh_patches_debug_19042
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-2753
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix log formatting of iterable objects
If logged object is a dictionary (or any other iterable object), 1 argument is extected to a number of items in the object by `BaseFormatter` in attempt to prettify a message. This would result in a invalid message format with unexpected numbers of arguments.
```
import logging
logger = logging.getLogger(__name__)
logger.debug('my dict: %s', {'here': 'is', 'my': 'dict'})
```
Which results in the following error:
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 868, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 741, in format
return fmt.format(record)
File "/usr/local/lib/python2.7/dist-packages/pelican/log.py", line 34, in format
return super(BaseFormatter, self).format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 465, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 329, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
```
introduce by https://github.com/getpelican/pelican/commit/dd76c7158f7e05b0d203818d3fe18bea26e48c3f in #2438
"Solution": try to prettify arguments only if it's a tuple: `logger.debug('my message: %s and %s', 'foo', 'bar')`
This requires review and thourugh testing.
</issue>
<code>
[start of pelican/log.py]
1 import logging
2 import os
3 import sys
4 from collections import defaultdict
5
6 __all__ = [
7 'init'
8 ]
9
10
11 class BaseFormatter(logging.Formatter):
12 def __init__(self, fmt=None, datefmt=None):
13 FORMAT = '%(customlevelname)s %(message)s'
14 super().__init__(fmt=FORMAT, datefmt=datefmt)
15
16 def format(self, record):
17 customlevel = self._get_levelname(record.levelname)
18 record.__dict__['customlevelname'] = customlevel
19 # format multiline messages 'nicely' to make it clear they are together
20 record.msg = record.msg.replace('\n', '\n | ')
21 record.args = tuple(arg.replace('\n', '\n | ') if
22 isinstance(arg, str) else
23 arg for arg in record.args)
24 return super().format(record)
25
26 def formatException(self, ei):
27 ''' prefix traceback info for better representation '''
28 s = super().formatException(ei)
29 # fancy format traceback
30 s = '\n'.join(' | ' + line for line in s.splitlines())
31 # separate the traceback from the preceding lines
32 s = ' |___\n{}'.format(s)
33 return s
34
35 def _get_levelname(self, name):
36 ''' NOOP: overridden by subclasses '''
37 return name
38
39
40 class ANSIFormatter(BaseFormatter):
41 ANSI_CODES = {
42 'red': '\033[1;31m',
43 'yellow': '\033[1;33m',
44 'cyan': '\033[1;36m',
45 'white': '\033[1;37m',
46 'bgred': '\033[1;41m',
47 'bggrey': '\033[1;100m',
48 'reset': '\033[0;m'}
49
50 LEVEL_COLORS = {
51 'INFO': 'cyan',
52 'WARNING': 'yellow',
53 'ERROR': 'red',
54 'CRITICAL': 'bgred',
55 'DEBUG': 'bggrey'}
56
57 def _get_levelname(self, name):
58 color = self.ANSI_CODES[self.LEVEL_COLORS.get(name, 'white')]
59 if name == 'INFO':
60 fmt = '{0}->{2}'
61 else:
62 fmt = '{0}{1}{2}:'
63 return fmt.format(color, name, self.ANSI_CODES['reset'])
64
65
66 class TextFormatter(BaseFormatter):
67 """
68 Convert a `logging.LogRecord' object into text.
69 """
70
71 def _get_levelname(self, name):
72 if name == 'INFO':
73 return '->'
74 else:
75 return name + ':'
76
77
78 class LimitFilter(logging.Filter):
79 """
80 Remove duplicates records, and limit the number of records in the same
81 group.
82
83 Groups are specified by the message to use when the number of records in
84 the same group hit the limit.
85 E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))
86 """
87
88 LOGS_DEDUP_MIN_LEVEL = logging.WARNING
89
90 _ignore = set()
91 _raised_messages = set()
92 _threshold = 5
93 _group_count = defaultdict(int)
94
95 def filter(self, record):
96 # don't limit log messages for anything above "warning"
97 if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:
98 return True
99
100 # extract group
101 group = record.__dict__.get('limit_msg', None)
102 group_args = record.__dict__.get('limit_args', ())
103
104 # ignore record if it was already raised
105 message_key = (record.levelno, record.getMessage())
106 if message_key in self._raised_messages:
107 return False
108 else:
109 self._raised_messages.add(message_key)
110
111 # ignore LOG_FILTER records by templates or messages
112 # when "debug" isn't enabled
113 logger_level = logging.getLogger().getEffectiveLevel()
114 if logger_level > logging.DEBUG:
115 template_key = (record.levelno, record.msg)
116 message_key = (record.levelno, record.getMessage())
117 if (template_key in self._ignore or message_key in self._ignore):
118 return False
119
120 # check if we went over threshold
121 if group:
122 key = (record.levelno, group)
123 self._group_count[key] += 1
124 if self._group_count[key] == self._threshold:
125 record.msg = group
126 record.args = group_args
127 elif self._group_count[key] > self._threshold:
128 return False
129 return True
130
131
132 class LimitLogger(logging.Logger):
133 """
134 A logger which adds LimitFilter automatically
135 """
136
137 limit_filter = LimitFilter()
138
139 def __init__(self, *args, **kwargs):
140 super().__init__(*args, **kwargs)
141 self.enable_filter()
142
143 def disable_filter(self):
144 self.removeFilter(LimitLogger.limit_filter)
145
146 def enable_filter(self):
147 self.addFilter(LimitLogger.limit_filter)
148
149
150 class FatalLogger(LimitLogger):
151 warnings_fatal = False
152 errors_fatal = False
153
154 def warning(self, *args, **kwargs):
155 super().warning(*args, **kwargs)
156 if FatalLogger.warnings_fatal:
157 raise RuntimeError('Warning encountered')
158
159 def error(self, *args, **kwargs):
160 super().error(*args, **kwargs)
161 if FatalLogger.errors_fatal:
162 raise RuntimeError('Error encountered')
163
164
165 logging.setLoggerClass(FatalLogger)
166
167
168 def supports_color():
169 """
170 Returns True if the running system's terminal supports color,
171 and False otherwise.
172
173 from django.core.management.color
174 """
175 plat = sys.platform
176 supported_platform = plat != 'Pocket PC' and \
177 (plat != 'win32' or 'ANSICON' in os.environ)
178
179 # isatty is not always implemented, #6223.
180 is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
181 if not supported_platform or not is_a_tty:
182 return False
183 return True
184
185
186 def get_formatter():
187 if supports_color():
188 return ANSIFormatter()
189 else:
190 return TextFormatter()
191
192
193 def init(level=None, fatal='', handler=logging.StreamHandler(), name=None,
194 logs_dedup_min_level=None):
195 FatalLogger.warnings_fatal = fatal.startswith('warning')
196 FatalLogger.errors_fatal = bool(fatal)
197
198 logger = logging.getLogger(name)
199
200 handler.setFormatter(get_formatter())
201 logger.addHandler(handler)
202
203 if level:
204 logger.setLevel(level)
205 if logs_dedup_min_level:
206 LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level
207
208
209 def log_warnings():
210 import warnings
211 logging.captureWarnings(True)
212 warnings.simplefilter("default", DeprecationWarning)
213 init(logging.DEBUG, name='py.warnings')
214
215
216 if __name__ == '__main__':
217 init(level=logging.DEBUG)
218
219 root_logger = logging.getLogger()
220 root_logger.debug('debug')
221 root_logger.info('info')
222 root_logger.warning('warning')
223 root_logger.error('error')
224 root_logger.critical('critical')
225
[end of pelican/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pelican/log.py b/pelican/log.py
--- a/pelican/log.py
+++ b/pelican/log.py
@@ -2,6 +2,7 @@
import os
import sys
from collections import defaultdict
+from collections.abc import Mapping
__all__ = [
'init'
@@ -18,9 +19,10 @@
record.__dict__['customlevelname'] = customlevel
# format multiline messages 'nicely' to make it clear they are together
record.msg = record.msg.replace('\n', '\n | ')
- record.args = tuple(arg.replace('\n', '\n | ') if
- isinstance(arg, str) else
- arg for arg in record.args)
+ if not isinstance(record.args, Mapping):
+ record.args = tuple(arg.replace('\n', '\n | ') if
+ isinstance(arg, str) else
+ arg for arg in record.args)
return super().format(record)
def formatException(self, ei):
|
{"golden_diff": "diff --git a/pelican/log.py b/pelican/log.py\n--- a/pelican/log.py\n+++ b/pelican/log.py\n@@ -2,6 +2,7 @@\n import os\n import sys\n from collections import defaultdict\n+from collections.abc import Mapping\n \n __all__ = [\n 'init'\n@@ -18,9 +19,10 @@\n record.__dict__['customlevelname'] = customlevel\n # format multiline messages 'nicely' to make it clear they are together\n record.msg = record.msg.replace('\\n', '\\n | ')\n- record.args = tuple(arg.replace('\\n', '\\n | ') if\n- isinstance(arg, str) else\n- arg for arg in record.args)\n+ if not isinstance(record.args, Mapping):\n+ record.args = tuple(arg.replace('\\n', '\\n | ') if\n+ isinstance(arg, str) else\n+ arg for arg in record.args)\n return super().format(record)\n \n def formatException(self, ei):\n", "issue": "fix log formatting of iterable objects\nIf logged object is a dictionary (or any other iterable object), 1 argument is extected to a number of items in the object by `BaseFormatter` in attempt to prettify a message. This would result in a invalid message format with unexpected numbers of arguments.\r\n\r\n```\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\nlogger.debug('my dict: %s', {'here': 'is', 'my': 'dict'})\r\n```\r\n\r\nWhich results in the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 868, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 741, in format\r\n return fmt.format(record)\r\n File \"/usr/local/lib/python2.7/dist-packages/pelican/log.py\", line 34, in format\r\n return super(BaseFormatter, self).format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 465, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 329, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\n```\r\n\r\nintroduce by https://github.com/getpelican/pelican/commit/dd76c7158f7e05b0d203818d3fe18bea26e48c3f in #2438\r\n\r\n\"Solution\": try to prettify arguments only if it's a tuple: `logger.debug('my message: %s and %s', 'foo', 'bar')`\r\n\r\nThis requires review and thourugh testing.\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom collections import defaultdict\n\n__all__ = [\n 'init'\n]\n\n\nclass BaseFormatter(logging.Formatter):\n def __init__(self, fmt=None, datefmt=None):\n FORMAT = '%(customlevelname)s %(message)s'\n super().__init__(fmt=FORMAT, datefmt=datefmt)\n\n def format(self, record):\n customlevel = self._get_levelname(record.levelname)\n record.__dict__['customlevelname'] = customlevel\n # format multiline messages 'nicely' to make it clear they are together\n record.msg = record.msg.replace('\\n', '\\n | ')\n record.args = tuple(arg.replace('\\n', '\\n | ') if\n isinstance(arg, str) else\n arg for arg in record.args)\n return super().format(record)\n\n def formatException(self, ei):\n ''' prefix traceback info for better representation '''\n s = super().formatException(ei)\n # fancy format traceback\n s = '\\n'.join(' | ' + line for line in s.splitlines())\n # separate the traceback from the preceding lines\n s = ' |___\\n{}'.format(s)\n return s\n\n def _get_levelname(self, name):\n ''' NOOP: overridden by subclasses '''\n return name\n\n\nclass ANSIFormatter(BaseFormatter):\n ANSI_CODES = {\n 'red': '\\033[1;31m',\n 'yellow': '\\033[1;33m',\n 'cyan': '\\033[1;36m',\n 'white': '\\033[1;37m',\n 'bgred': '\\033[1;41m',\n 'bggrey': '\\033[1;100m',\n 'reset': '\\033[0;m'}\n\n LEVEL_COLORS = {\n 'INFO': 'cyan',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bgred',\n 'DEBUG': 'bggrey'}\n\n def _get_levelname(self, name):\n color = self.ANSI_CODES[self.LEVEL_COLORS.get(name, 'white')]\n if name == 'INFO':\n fmt = '{0}->{2}'\n else:\n fmt = '{0}{1}{2}:'\n return fmt.format(color, name, self.ANSI_CODES['reset'])\n\n\nclass TextFormatter(BaseFormatter):\n \"\"\"\n Convert a `logging.LogRecord' object into text.\n \"\"\"\n\n def _get_levelname(self, name):\n if name == 'INFO':\n return '->'\n else:\n return name + ':'\n\n\nclass LimitFilter(logging.Filter):\n \"\"\"\n Remove duplicates records, and limit the number of records in the same\n group.\n\n Groups are specified by the message to use when the number of records in\n the same group hit the limit.\n E.g.: log.warning(('43 is not the answer', 'More erroneous answers'))\n \"\"\"\n\n LOGS_DEDUP_MIN_LEVEL = logging.WARNING\n\n _ignore = set()\n _raised_messages = set()\n _threshold = 5\n _group_count = defaultdict(int)\n\n def filter(self, record):\n # don't limit log messages for anything above \"warning\"\n if record.levelno > self.LOGS_DEDUP_MIN_LEVEL:\n return True\n\n # extract group\n group = record.__dict__.get('limit_msg', None)\n group_args = record.__dict__.get('limit_args', ())\n\n # ignore record if it was already raised\n message_key = (record.levelno, record.getMessage())\n if message_key in self._raised_messages:\n return False\n else:\n self._raised_messages.add(message_key)\n\n # ignore LOG_FILTER records by templates or messages\n # when \"debug\" isn't enabled\n logger_level = logging.getLogger().getEffectiveLevel()\n if logger_level > logging.DEBUG:\n template_key = (record.levelno, record.msg)\n message_key = (record.levelno, record.getMessage())\n if (template_key in self._ignore or message_key in self._ignore):\n return False\n\n # check if we went over threshold\n if group:\n key = (record.levelno, group)\n self._group_count[key] += 1\n if self._group_count[key] == self._threshold:\n record.msg = group\n record.args = group_args\n elif self._group_count[key] > self._threshold:\n return False\n return True\n\n\nclass LimitLogger(logging.Logger):\n \"\"\"\n A logger which adds LimitFilter automatically\n \"\"\"\n\n limit_filter = LimitFilter()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.enable_filter()\n\n def disable_filter(self):\n self.removeFilter(LimitLogger.limit_filter)\n\n def enable_filter(self):\n self.addFilter(LimitLogger.limit_filter)\n\n\nclass FatalLogger(LimitLogger):\n warnings_fatal = False\n errors_fatal = False\n\n def warning(self, *args, **kwargs):\n super().warning(*args, **kwargs)\n if FatalLogger.warnings_fatal:\n raise RuntimeError('Warning encountered')\n\n def error(self, *args, **kwargs):\n super().error(*args, **kwargs)\n if FatalLogger.errors_fatal:\n raise RuntimeError('Error encountered')\n\n\nlogging.setLoggerClass(FatalLogger)\n\n\ndef supports_color():\n \"\"\"\n Returns True if the running system's terminal supports color,\n and False otherwise.\n\n from django.core.management.color\n \"\"\"\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and \\\n (plat != 'win32' or 'ANSICON' in os.environ)\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True\n\n\ndef get_formatter():\n if supports_color():\n return ANSIFormatter()\n else:\n return TextFormatter()\n\n\ndef init(level=None, fatal='', handler=logging.StreamHandler(), name=None,\n logs_dedup_min_level=None):\n FatalLogger.warnings_fatal = fatal.startswith('warning')\n FatalLogger.errors_fatal = bool(fatal)\n\n logger = logging.getLogger(name)\n\n handler.setFormatter(get_formatter())\n logger.addHandler(handler)\n\n if level:\n logger.setLevel(level)\n if logs_dedup_min_level:\n LimitFilter.LOGS_DEDUP_MIN_LEVEL = logs_dedup_min_level\n\n\ndef log_warnings():\n import warnings\n logging.captureWarnings(True)\n warnings.simplefilter(\"default\", DeprecationWarning)\n init(logging.DEBUG, name='py.warnings')\n\n\nif __name__ == '__main__':\n init(level=logging.DEBUG)\n\n root_logger = logging.getLogger()\n root_logger.debug('debug')\n root_logger.info('info')\n root_logger.warning('warning')\n root_logger.error('error')\n root_logger.critical('critical')\n", "path": "pelican/log.py"}]}
| 3,030 | 226 |
gh_patches_debug_16861
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3656
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC]: Unnecessary step to reformat questions
### 📚 The doc issue
The current documentation contains unnecessary step to reformat questions from FastChat's format to our internal format.
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/evaluate/format_questions.py]
1 import argparse
2 import os
3 import json
4 import copy
5
6 from utils import jdump, get_json_list
7
8
9 def format_questions(args):
10 questions = get_json_list(args.questions_path)
11 keys=questions[0].keys()
12
13 formatted_questions=copy.deepcopy(questions)
14 for i in range(len(formatted_questions)):
15 formatted_questions[i]['instruction']=questions[i]['text']
16 formatted_questions[i]['input']=""
17 formatted_questions[i]['output']=""
18 formatted_questions[i]['id']=questions[i]['question_id']
19 for key in keys:
20 if key=="category":
21 continue
22 del formatted_questions[i][key]
23
24 jdump(formatted_questions, args.save_path)
25
26 if __name__ == '__main__':
27 parser = argparse.ArgumentParser()
28 parser.add_argument('--questions_path', type=str, default='table/question.jsonl')
29 parser.add_argument('--save_path', type=str, default="table/questions.json")
30 args = parser.parse_args()
31 format_questions(args)
[end of applications/Chat/evaluate/format_questions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/Chat/evaluate/format_questions.py b/applications/Chat/evaluate/format_questions.py
deleted file mode 100644
--- a/applications/Chat/evaluate/format_questions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import argparse
-import os
-import json
-import copy
-
-from utils import jdump, get_json_list
-
-
-def format_questions(args):
- questions = get_json_list(args.questions_path)
- keys=questions[0].keys()
-
- formatted_questions=copy.deepcopy(questions)
- for i in range(len(formatted_questions)):
- formatted_questions[i]['instruction']=questions[i]['text']
- formatted_questions[i]['input']=""
- formatted_questions[i]['output']=""
- formatted_questions[i]['id']=questions[i]['question_id']
- for key in keys:
- if key=="category":
- continue
- del formatted_questions[i][key]
-
- jdump(formatted_questions, args.save_path)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--questions_path', type=str, default='table/question.jsonl')
- parser.add_argument('--save_path', type=str, default="table/questions.json")
- args = parser.parse_args()
- format_questions(args)
\ No newline at end of file
|
{"golden_diff": "diff --git a/applications/Chat/evaluate/format_questions.py b/applications/Chat/evaluate/format_questions.py\ndeleted file mode 100644\n--- a/applications/Chat/evaluate/format_questions.py\n+++ /dev/null\n@@ -1,31 +0,0 @@\n-import argparse\n-import os\n-import json\n-import copy\n-\n-from utils import jdump, get_json_list\n-\n-\n-def format_questions(args):\n- questions = get_json_list(args.questions_path)\n- keys=questions[0].keys()\n- \n- formatted_questions=copy.deepcopy(questions)\n- for i in range(len(formatted_questions)):\n- formatted_questions[i]['instruction']=questions[i]['text']\n- formatted_questions[i]['input']=\"\"\n- formatted_questions[i]['output']=\"\"\n- formatted_questions[i]['id']=questions[i]['question_id']\n- for key in keys:\n- if key==\"category\":\n- continue\n- del formatted_questions[i][key]\n- \n- jdump(formatted_questions, args.save_path)\n-\n-if __name__ == '__main__':\n- parser = argparse.ArgumentParser()\n- parser.add_argument('--questions_path', type=str, default='table/question.jsonl')\n- parser.add_argument('--save_path', type=str, default=\"table/questions.json\")\n- args = parser.parse_args()\n- format_questions(args)\n\\ No newline at end of file\n", "issue": "[DOC]: Unnecessary step to reformat questions\n### \ud83d\udcda The doc issue\n\nThe current documentation contains unnecessary step to reformat questions from FastChat's format to our internal format.\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport os\nimport json\nimport copy\n\nfrom utils import jdump, get_json_list\n\n\ndef format_questions(args):\n questions = get_json_list(args.questions_path)\n keys=questions[0].keys()\n \n formatted_questions=copy.deepcopy(questions)\n for i in range(len(formatted_questions)):\n formatted_questions[i]['instruction']=questions[i]['text']\n formatted_questions[i]['input']=\"\"\n formatted_questions[i]['output']=\"\"\n formatted_questions[i]['id']=questions[i]['question_id']\n for key in keys:\n if key==\"category\":\n continue\n del formatted_questions[i][key]\n \n jdump(formatted_questions, args.save_path)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--questions_path', type=str, default='table/question.jsonl')\n parser.add_argument('--save_path', type=str, default=\"table/questions.json\")\n args = parser.parse_args()\n format_questions(args)", "path": "applications/Chat/evaluate/format_questions.py"}]}
| 855 | 298 |
gh_patches_debug_2083
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1610
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/connection/base.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
4
5 from litestar._parsers import parse_cookie_string, parse_headers, parse_query_string
6 from litestar.datastructures.headers import Headers
7 from litestar.datastructures.multi_dicts import MultiDict
8 from litestar.datastructures.state import State
9 from litestar.datastructures.url import URL, Address, make_absolute_url
10 from litestar.exceptions import ImproperlyConfiguredException
11 from litestar.types.empty import Empty
12
13 __all__ = ("ASGIConnection", "empty_receive", "empty_send")
14
15
16 if TYPE_CHECKING:
17 from typing import NoReturn
18
19 from pydantic import BaseModel
20
21 from litestar.app import Litestar
22 from litestar.types import EmptyType
23 from litestar.types.asgi_types import Message, Receive, Scope, Send
24 from litestar.types.protocols import Logger
25
26 UserT = TypeVar("UserT")
27 AuthT = TypeVar("AuthT")
28 HandlerT = TypeVar("HandlerT")
29 StateT = TypeVar("StateT", bound=State)
30
31
32 async def empty_receive() -> NoReturn: # pragma: no cover
33 """Raise a ``RuntimeError``.
34
35 Serves as a placeholder ``send`` function.
36
37 Raises:
38 RuntimeError
39 """
40 raise RuntimeError()
41
42
43 async def empty_send(_: Message) -> NoReturn: # pragma: no cover
44 """Raise a ``RuntimeError``.
45
46 Serves as a placeholder ``send`` function.
47
48 Args:
49 _: An ASGI message
50
51 Raises:
52 RuntimeError
53 """
54 raise RuntimeError()
55
56
57 class ASGIConnection(Generic[HandlerT, UserT, AuthT, StateT]):
58 """The base ASGI connection container."""
59
60 __slots__ = ("scope", "receive", "send", "_base_url", "_url", "_parsed_query", "_headers", "_cookies")
61
62 scope: Scope
63 """The ASGI scope attached to the connection."""
64 receive: Receive
65 """The ASGI receive function."""
66 send: Send
67 """The ASGI send function."""
68
69 def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:
70 """Initialize ``ASGIConnection``.
71
72 Args:
73 scope: The ASGI connection scope.
74 receive: The ASGI receive function.
75 send: The ASGI send function.
76 """
77 self.scope = scope
78 self.receive = receive
79 self.send = send
80 self._base_url: Any = scope.get("_base_url", Empty)
81 self._url: Any = scope.get("_url", Empty)
82 self._parsed_query: Any = scope.get("_parsed_query", Empty)
83 self._cookies: Any = scope.get("_cookies", Empty)
84 self._headers: Any = scope.get("_headers", Empty)
85
86 @property
87 def app(self) -> Litestar:
88 """Return the ``app`` for this connection.
89
90 Returns:
91 The :class:`Litestar <litestar.app.Litestar>` application instance
92 """
93 return self.scope["app"]
94
95 @property
96 def route_handler(self) -> HandlerT:
97 """Return the ``route_handler`` for this connection.
98
99 Returns:
100 The target route handler instance.
101 """
102 return cast("HandlerT", self.scope["route_handler"])
103
104 @property
105 def state(self) -> StateT:
106 """Return the ``State`` of this connection.
107
108 Returns:
109 A State instance constructed from the scope["state"] value.
110 """
111 return cast("StateT", State(self.scope["state"]))
112
113 @property
114 def url(self) -> URL:
115 """Return the URL of this connection's ``Scope``.
116
117 Returns:
118 A URL instance constructed from the request's scope.
119 """
120 if self._url is Empty:
121 self._url = self.scope["_url"] = URL.from_scope(self.scope) # type: ignore[typeddict-unknown-key]
122
123 return cast("URL", self._url)
124
125 @property
126 def base_url(self) -> URL:
127 """Return the base URL of this connection's ``Scope``.
128
129 Returns:
130 A URL instance constructed from the request's scope, representing only the base part
131 (host + domain + prefix) of the request.
132 """
133 if self._base_url is Empty:
134 scope = {
135 **self.scope,
136 "path": "/",
137 "query_string": b"",
138 "root_path": self.scope.get("app_root_path") or self.scope.get("root_path", ""),
139 }
140 self._base_url = self.scope["_base_url"] = URL.from_scope(cast("Scope", scope)) # type: ignore[typeddict-unknown-key]
141
142 return cast("URL", self._base_url)
143
144 @property
145 def headers(self) -> Headers:
146 """Return the headers of this connection's ``Scope``.
147
148 Returns:
149 A Headers instance with the request's scope["headers"] value.
150 """
151 if self._headers is Empty:
152 self.scope.setdefault("headers", [])
153 self._headers = self.scope["_headers"] = parse_headers(tuple(self.scope["headers"])) # type: ignore[typeddict-unknown-key]
154
155 return Headers(self._headers)
156
157 @property
158 def query_params(self) -> MultiDict:
159 """Return the query parameters of this connection's ``Scope``.
160
161 Returns:
162 A normalized dict of query parameters. Multiple values for the same key are returned as a list.
163 """
164 if self._parsed_query is Empty:
165 self._parsed_query = self.scope["_parsed_query"] = parse_query_string(self.scope.get("query_string", b"")) # type: ignore
166
167 return MultiDict(self._parsed_query)
168
169 @property
170 def path_params(self) -> dict[str, Any]:
171 """Return the ``path_params`` of this connection's ``Scope``.
172
173 Returns:
174 A string keyed dictionary of path parameter values.
175 """
176 return self.scope["path_params"]
177
178 @property
179 def cookies(self) -> dict[str, str]:
180 """Return the ``cookies`` of this connection's ``Scope``.
181
182 Returns:
183 Returns any cookies stored in the header as a parsed dictionary.
184 """
185 if self._cookies is Empty:
186 cookies: dict[str, str] = {}
187 cookie_header = self.headers.get("cookie")
188
189 if cookie_header:
190 cookies = parse_cookie_string(cookie_header)
191
192 self._cookies = self.scope["_cookies"] = cookies # type: ignore[typeddict-unknown-key]
193
194 return cast("dict[str, str]", self._cookies)
195
196 @property
197 def client(self) -> Address | None:
198 """Return the ``client`` data of this connection's ``Scope``.
199
200 Returns:
201 A two tuple of the host name and port number.
202 """
203 client = self.scope.get("client")
204 return Address(*client) if client else None
205
206 @property
207 def auth(self) -> AuthT:
208 """Return the ``auth`` data of this connection's ``Scope``.
209
210 Raises:
211 ImproperlyConfiguredException: If ``auth`` is not set in scope via an ``AuthMiddleware``, raises an exception
212
213 Returns:
214 A type correlating to the generic variable Auth.
215 """
216 if "auth" not in self.scope:
217 raise ImproperlyConfiguredException("'auth' is not defined in scope, install an AuthMiddleware to set it")
218
219 return cast("AuthT", self.scope["auth"])
220
221 @property
222 def user(self) -> UserT:
223 """Return the ``user`` data of this connection's ``Scope``.
224
225 Raises:
226 ImproperlyConfiguredException: If ``user`` is not set in scope via an ``AuthMiddleware``, raises an exception
227
228 Returns:
229 A type correlating to the generic variable User.
230 """
231 if "user" not in self.scope:
232 raise ImproperlyConfiguredException("'user' is not defined in scope, install an AuthMiddleware to set it")
233
234 return cast("UserT", self.scope["user"])
235
236 @property
237 def session(self) -> dict[str, Any]:
238 """Return the session for this connection if a session was previously set in the ``Scope``
239
240 Returns:
241 A dictionary representing the session value - if existing.
242
243 Raises:
244 ImproperlyConfiguredException: if session is not set in scope.
245 """
246 if "session" not in self.scope:
247 raise ImproperlyConfiguredException(
248 "'session' is not defined in scope, install a SessionMiddleware to set it"
249 )
250
251 return cast("dict[str, Any]", self.scope["session"])
252
253 @property
254 def logger(self) -> Logger:
255 """Return the ``Logger`` instance for this connection.
256
257 Returns:
258 A ``Logger`` instance.
259
260 Raises:
261 ImproperlyConfiguredException: if ``log_config`` has not been passed to the Litestar constructor.
262 """
263 return self.app.get_logger()
264
265 def set_session(self, value: dict[str, Any] | BaseModel | EmptyType) -> None:
266 """Set the session in the connection's ``Scope``.
267
268 If the :class:`SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, the session will be added
269 to the response as a cookie header.
270
271 Args:
272 value: Dictionary or pydantic model instance for the session data.
273
274 Returns:
275 None.
276 """
277 self.scope["session"] = value
278
279 def clear_session(self) -> None:
280 """Remove the session from the connection's ``Scope``.
281
282 If the :class:`Litestar SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, this will cause
283 the session data to be cleared.
284
285 Returns:
286 None.
287 """
288 self.scope["session"] = Empty
289
290 def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str:
291 """Return the url for a given route handler name.
292
293 Args:
294 name: The ``name`` of the request route handler.
295 **path_parameters: Values for path parameters in the route
296
297 Raises:
298 NoRouteMatchFoundException: If route with ``name`` does not exist, path parameters are missing or have a
299 wrong type.
300
301 Returns:
302 A string representing the absolute url of the route handler.
303 """
304 litestar_instance = self.scope["app"]
305 url_path = litestar_instance.route_reverse(name, **path_parameters)
306
307 return make_absolute_url(url_path, self.base_url)
308
309 def url_for_static_asset(self, name: str, file_path: str) -> str:
310 """Receives a static files handler name, an asset file path and returns resolved absolute url to the asset.
311
312 Args:
313 name: A static handler unique name.
314 file_path: a string containing path to an asset.
315
316 Raises:
317 NoRouteMatchFoundException: If static files handler with ``name`` does not exist.
318
319 Returns:
320 A string representing absolute url to the asset.
321 """
322 litestar_instance = self.scope["app"]
323 url_path = litestar_instance.url_for_static_asset(name, file_path)
324
325 return make_absolute_url(url_path, self.base_url)
326
[end of litestar/connection/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/litestar/connection/base.py b/litestar/connection/base.py
--- a/litestar/connection/base.py
+++ b/litestar/connection/base.py
@@ -287,7 +287,7 @@
"""
self.scope["session"] = Empty
- def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str:
+ def url_for(self, name: str, **path_parameters: Any) -> str:
"""Return the url for a given route handler name.
Args:
|
{"golden_diff": "diff --git a/litestar/connection/base.py b/litestar/connection/base.py\n--- a/litestar/connection/base.py\n+++ b/litestar/connection/base.py\n@@ -287,7 +287,7 @@\n \"\"\"\n self.scope[\"session\"] = Empty\n \n- def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str:\n+ def url_for(self, name: str, **path_parameters: Any) -> str:\n \"\"\"Return the url for a given route handler name.\n \n Args:\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar, cast\n\nfrom litestar._parsers import parse_cookie_string, parse_headers, parse_query_string\nfrom litestar.datastructures.headers import Headers\nfrom litestar.datastructures.multi_dicts import MultiDict\nfrom litestar.datastructures.state import State\nfrom litestar.datastructures.url import URL, Address, make_absolute_url\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.types.empty import Empty\n\n__all__ = (\"ASGIConnection\", \"empty_receive\", \"empty_send\")\n\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\n from pydantic import BaseModel\n\n from litestar.app import Litestar\n from litestar.types import EmptyType\n from litestar.types.asgi_types import Message, Receive, Scope, Send\n from litestar.types.protocols import Logger\n\nUserT = TypeVar(\"UserT\")\nAuthT = TypeVar(\"AuthT\")\nHandlerT = TypeVar(\"HandlerT\")\nStateT = TypeVar(\"StateT\", bound=State)\n\n\nasync def empty_receive() -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nasync def empty_send(_: Message) -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Args:\n _: An ASGI message\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nclass ASGIConnection(Generic[HandlerT, UserT, AuthT, StateT]):\n \"\"\"The base ASGI connection container.\"\"\"\n\n __slots__ = (\"scope\", \"receive\", \"send\", \"_base_url\", \"_url\", \"_parsed_query\", \"_headers\", \"_cookies\")\n\n scope: Scope\n \"\"\"The ASGI scope attached to the connection.\"\"\"\n receive: Receive\n \"\"\"The ASGI receive function.\"\"\"\n send: Send\n \"\"\"The ASGI send function.\"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:\n \"\"\"Initialize ``ASGIConnection``.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n \"\"\"\n self.scope = scope\n self.receive = receive\n self.send = send\n self._base_url: Any = scope.get(\"_base_url\", Empty)\n self._url: Any = scope.get(\"_url\", Empty)\n self._parsed_query: Any = scope.get(\"_parsed_query\", Empty)\n self._cookies: Any = scope.get(\"_cookies\", Empty)\n self._headers: Any = scope.get(\"_headers\", Empty)\n\n @property\n def app(self) -> Litestar:\n \"\"\"Return the ``app`` for this connection.\n\n Returns:\n The :class:`Litestar <litestar.app.Litestar>` application instance\n \"\"\"\n return self.scope[\"app\"]\n\n @property\n def route_handler(self) -> HandlerT:\n \"\"\"Return the ``route_handler`` for this connection.\n\n Returns:\n The target route handler instance.\n \"\"\"\n return cast(\"HandlerT\", self.scope[\"route_handler\"])\n\n @property\n def state(self) -> StateT:\n \"\"\"Return the ``State`` of this connection.\n\n Returns:\n A State instance constructed from the scope[\"state\"] value.\n \"\"\"\n return cast(\"StateT\", State(self.scope[\"state\"]))\n\n @property\n def url(self) -> URL:\n \"\"\"Return the URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope.\n \"\"\"\n if self._url is Empty:\n self._url = self.scope[\"_url\"] = URL.from_scope(self.scope) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._url)\n\n @property\n def base_url(self) -> URL:\n \"\"\"Return the base URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope, representing only the base part\n (host + domain + prefix) of the request.\n \"\"\"\n if self._base_url is Empty:\n scope = {\n **self.scope,\n \"path\": \"/\",\n \"query_string\": b\"\",\n \"root_path\": self.scope.get(\"app_root_path\") or self.scope.get(\"root_path\", \"\"),\n }\n self._base_url = self.scope[\"_base_url\"] = URL.from_scope(cast(\"Scope\", scope)) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._base_url)\n\n @property\n def headers(self) -> Headers:\n \"\"\"Return the headers of this connection's ``Scope``.\n\n Returns:\n A Headers instance with the request's scope[\"headers\"] value.\n \"\"\"\n if self._headers is Empty:\n self.scope.setdefault(\"headers\", [])\n self._headers = self.scope[\"_headers\"] = parse_headers(tuple(self.scope[\"headers\"])) # type: ignore[typeddict-unknown-key]\n\n return Headers(self._headers)\n\n @property\n def query_params(self) -> MultiDict:\n \"\"\"Return the query parameters of this connection's ``Scope``.\n\n Returns:\n A normalized dict of query parameters. Multiple values for the same key are returned as a list.\n \"\"\"\n if self._parsed_query is Empty:\n self._parsed_query = self.scope[\"_parsed_query\"] = parse_query_string(self.scope.get(\"query_string\", b\"\")) # type: ignore\n\n return MultiDict(self._parsed_query)\n\n @property\n def path_params(self) -> dict[str, Any]:\n \"\"\"Return the ``path_params`` of this connection's ``Scope``.\n\n Returns:\n A string keyed dictionary of path parameter values.\n \"\"\"\n return self.scope[\"path_params\"]\n\n @property\n def cookies(self) -> dict[str, str]:\n \"\"\"Return the ``cookies`` of this connection's ``Scope``.\n\n Returns:\n Returns any cookies stored in the header as a parsed dictionary.\n \"\"\"\n if self._cookies is Empty:\n cookies: dict[str, str] = {}\n cookie_header = self.headers.get(\"cookie\")\n\n if cookie_header:\n cookies = parse_cookie_string(cookie_header)\n\n self._cookies = self.scope[\"_cookies\"] = cookies # type: ignore[typeddict-unknown-key]\n\n return cast(\"dict[str, str]\", self._cookies)\n\n @property\n def client(self) -> Address | None:\n \"\"\"Return the ``client`` data of this connection's ``Scope``.\n\n Returns:\n A two tuple of the host name and port number.\n \"\"\"\n client = self.scope.get(\"client\")\n return Address(*client) if client else None\n\n @property\n def auth(self) -> AuthT:\n \"\"\"Return the ``auth`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``auth`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable Auth.\n \"\"\"\n if \"auth\" not in self.scope:\n raise ImproperlyConfiguredException(\"'auth' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"AuthT\", self.scope[\"auth\"])\n\n @property\n def user(self) -> UserT:\n \"\"\"Return the ``user`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``user`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable User.\n \"\"\"\n if \"user\" not in self.scope:\n raise ImproperlyConfiguredException(\"'user' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"UserT\", self.scope[\"user\"])\n\n @property\n def session(self) -> dict[str, Any]:\n \"\"\"Return the session for this connection if a session was previously set in the ``Scope``\n\n Returns:\n A dictionary representing the session value - if existing.\n\n Raises:\n ImproperlyConfiguredException: if session is not set in scope.\n \"\"\"\n if \"session\" not in self.scope:\n raise ImproperlyConfiguredException(\n \"'session' is not defined in scope, install a SessionMiddleware to set it\"\n )\n\n return cast(\"dict[str, Any]\", self.scope[\"session\"])\n\n @property\n def logger(self) -> Logger:\n \"\"\"Return the ``Logger`` instance for this connection.\n\n Returns:\n A ``Logger`` instance.\n\n Raises:\n ImproperlyConfiguredException: if ``log_config`` has not been passed to the Litestar constructor.\n \"\"\"\n return self.app.get_logger()\n\n def set_session(self, value: dict[str, Any] | BaseModel | EmptyType) -> None:\n \"\"\"Set the session in the connection's ``Scope``.\n\n If the :class:`SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, the session will be added\n to the response as a cookie header.\n\n Args:\n value: Dictionary or pydantic model instance for the session data.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = value\n\n def clear_session(self) -> None:\n \"\"\"Remove the session from the connection's ``Scope``.\n\n If the :class:`Litestar SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, this will cause\n the session data to be cleared.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = Empty\n\n def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str:\n \"\"\"Return the url for a given route handler name.\n\n Args:\n name: The ``name`` of the request route handler.\n **path_parameters: Values for path parameters in the route\n\n Raises:\n NoRouteMatchFoundException: If route with ``name`` does not exist, path parameters are missing or have a\n wrong type.\n\n Returns:\n A string representing the absolute url of the route handler.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.route_reverse(name, **path_parameters)\n\n return make_absolute_url(url_path, self.base_url)\n\n def url_for_static_asset(self, name: str, file_path: str) -> str:\n \"\"\"Receives a static files handler name, an asset file path and returns resolved absolute url to the asset.\n\n Args:\n name: A static handler unique name.\n file_path: a string containing path to an asset.\n\n Raises:\n NoRouteMatchFoundException: If static files handler with ``name`` does not exist.\n\n Returns:\n A string representing absolute url to the asset.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.url_for_static_asset(name, file_path)\n\n return make_absolute_url(url_path, self.base_url)\n", "path": "litestar/connection/base.py"}]}
| 4,013 | 120 |
gh_patches_debug_1032
|
rasdani/github-patches
|
git_diff
|
angr__angr-2256
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The version of CFFI>=1.7.0 maybe not correct
<!--
*Disclaimer:
The angr suite is maintained by a small team of volunteers.
While we cannot guarantee any timeliness for fixes and enhancements, we will do our best.
For more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)*
-->
---
**Describe the bug.**
<!--
Please include a clear and concise description of what the bug is.
-->
I encounter a error, like this:
```
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/angr/project.py", line 131, in __init__
self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options)
File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 133, in __init__
self.initial_load_objects = self._internal_load(main_binary, *preload_libs, *force_load_libs, preloading=(main_binary, *preload_libs))
File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 652, in _internal_load
obj = self._load_object_isolated(main_spec)
File "/usr/local/lib/python3.6/dist-packages/cle/loader.py", line 832, in _load_object_isolated
result = backend_cls(binary, binary_stream, is_main_bin=self.main_object is None, loader=self, **options)
File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/elf.py", line 152, in __init__
self._load_plt()
File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 90, in _load_plt
sanity_check=not self.pic)
File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 49, in _add_plt_stub
if sanity_check and target_addr not in [c.value for c in self._block(addr, skip_stmts=False).all_constants]:
File "/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py", line 42, in _block
return pyvex.IRSB(dat, addr, self.arch, bytes_offset=1 if thumb else 0, opt_level=1, skip_stmts=skip_stmts)
File "/usr/local/lib/python3.6/dist-packages/pyvex/block.py", line 115, in __init__
cross_insn_opt=cross_insn_opt,
File "/usr/local/lib/python3.6/dist-packages/pyvex/lifting/__init__.py", line 83, in lift
u_data = ffi.from_buffer(ffi.BVoidP, py_data + b'\0' * 8 if type(py_data) is bytes else py_data)
File "/home/ling/.local/lib/python3.6/site-packages/cffi/api.py", line 362, in from_buffer
require_writable)
TypeError: expected an array ctype, got 'void *'
```
and I solve it by upgrade cffi from cffi-1.12.2 to cffi-1.14.0.
In the setup.py of angr, the version of cffi is only required >=1.7.0
**Environment Information.**
<!--
Many common issues are caused by problems with the local Python environment.
Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date.
Please include the output of `python -m angr.misc.bug_report` here.
-->
**To Reproduce.**
<!--
Please include *both a script to reproduce the crash, and attach the binary used, if possible*
-->
angr is v8.20.7.6
**Additional context.**
<!--
Add any other context about the problem here.
-->
</issue>
<code>
[start of setup.py]
1 # pylint: disable=no-name-in-module,import-error,unused-variable
2 import os
3 import sys
4 import subprocess
5 import pkg_resources
6 import shutil
7 import platform
8 import glob
9
10 if bytes is str:
11 raise Exception("""
12
13 =-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=
14
15 angr has transitioned to python 3. Due to the small size of the team behind it,
16 we can't reasonably maintain compatibility between both python 2 and python 3.
17 If you want to continue using the most recent version of angr (you definitely
18 want that, trust us) you should upgrade to python 3. It's like getting your
19 vaccinations. It hurts a little bit initially but in the end it's worth it.
20
21 If you are staying on python 2 and would like to make sure you don't get
22 incompatible versions, make sure your pip is at least version 9.0, and it will
23 use our metadata to implicitly avoid them.
24
25 For more information, see here: https://docs.angr.io/appendix/migration
26
27 Good luck!
28 """)
29
30 try:
31 from setuptools import setup
32 from setuptools import find_packages
33 packages = find_packages()
34 except ImportError:
35 from distutils.core import setup
36 packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
37
38 from distutils.util import get_platform
39 from distutils.errors import LibError
40 from distutils.command.build import build as _build
41 from distutils.command.clean import clean as _clean
42
43 if sys.platform == 'darwin':
44 library_file = "angr_native.dylib"
45 elif sys.platform in ('win32', 'cygwin'):
46 library_file = "angr_native.dll"
47 else:
48 library_file = "angr_native.so"
49
50 def _build_native():
51 try:
52 import unicorn
53 import pyvex
54 except ImportError:
55 raise LibError("You must install unicorn and pyvex before building angr")
56
57 env = os.environ.copy()
58 env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),
59 ('UNICORN_LIB_PATH', 'unicorn', 'lib'),
60 ('UNICORN_LIB_FILE', 'unicorn', 'lib\\unicorn.lib'),
61 ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),
62 ('PYVEX_LIB_PATH', 'pyvex', 'lib'),
63 ('PYVEX_LIB_FILE', 'pyvex', 'lib\\pyvex.lib'))
64 for var, pkg, fnm in env_data:
65 try:
66 env[var] = pkg_resources.resource_filename(pkg, fnm)
67 except KeyError:
68 pass
69
70 cmd1 = ['nmake', '/f', 'Makefile-win']
71 cmd2 = ['make']
72 for cmd in (cmd1, cmd2):
73 try:
74 if subprocess.call(cmd, cwd='native', env=env) != 0:
75 raise LibError('Unable to build angr_native')
76 break
77 except OSError:
78 continue
79 else:
80 raise LibError('Unable to build angr_native')
81
82 shutil.rmtree('angr/lib', ignore_errors=True)
83 os.mkdir('angr/lib')
84 shutil.copy(os.path.join('native', library_file), 'angr/lib')
85
86 def _clean_native():
87 oglob = glob.glob('native/*.o')
88 oglob += glob.glob('native/*.obj')
89 oglob += glob.glob('native/*.so')
90 oglob += glob.glob('native/*.dll')
91 oglob += glob.glob('native/*.dylib')
92 for fname in oglob:
93 os.unlink(fname)
94
95 class build(_build):
96 def run(self, *args):
97 self.execute(_build_native, (), msg='Building angr_native')
98 _build.run(self, *args)
99
100 class clean(_clean):
101 def run(self, *args):
102 self.execute(_clean_native, (), msg='Cleaning angr_native')
103 _clean.run(self, *args)
104
105 cmdclass = {
106 'build': build,
107 'clean': clean,
108 }
109
110 try:
111 from setuptools.command.develop import develop as _develop
112 class develop(_develop):
113 def run(self, *args):
114 self.execute(_build_native, (), msg='Building angr_native')
115 _develop.run(self, *args)
116
117 cmdclass['develop'] = develop
118 except ImportError:
119 pass
120
121 if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:
122 sys.argv.append('--plat-name')
123 name = get_platform()
124 if 'linux' in name:
125 # linux_* platform tags are disallowed because the python ecosystem is fubar
126 # linux builds should be built in the centos 5 vm for maximum compatibility
127 sys.argv.append('manylinux1_' + platform.machine())
128 else:
129 # https://www.python.org/dev/peps/pep-0425/
130 sys.argv.append(name.replace('.', '_').replace('-', '_'))
131
132 _UNICORN = "unicorn>=1.0.2rc2"
133
134 setup(
135 name='angr',
136 version='8.20.7.6',
137 python_requires='>=3.6',
138 description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',
139 url='https://github.com/angr/angr',
140 packages=packages,
141 install_requires=[
142 'sortedcontainers',
143 'cachetools',
144 'capstone>=3.0.5rc2',
145 'dpkt',
146 'mulpyplexer',
147 'networkx>=2.0',
148 'progressbar2',
149 'rpyc',
150 'cffi>=1.7.0',
151 _UNICORN,
152 'archinfo==8.20.7.6',
153 'claripy==8.20.7.6',
154 'cle==8.20.7.6',
155 'pyvex==8.20.7.6',
156 'ailment==8.20.7.6',
157 'GitPython',
158 'psutil',
159 'pycparser>=2.18',
160 'itanium_demangler',
161 'CppHeaderParser',
162 'protobuf',
163 ],
164 setup_requires=[_UNICORN, 'pyvex'],
165 extras_require={
166 'AngrDB': ['sqlalchemy'],
167 },
168 cmdclass=cmdclass,
169 include_package_data=True,
170 package_data={
171 'angr': ['lib/*', "py.typed"]
172 }
173 )
174
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -147,7 +147,7 @@
'networkx>=2.0',
'progressbar2',
'rpyc',
- 'cffi>=1.7.0',
+ 'cffi>=1.14.0',
_UNICORN,
'archinfo==8.20.7.6',
'claripy==8.20.7.6',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -147,7 +147,7 @@\n 'networkx>=2.0',\n 'progressbar2',\n 'rpyc',\n- 'cffi>=1.7.0',\n+ 'cffi>=1.14.0',\n _UNICORN,\n 'archinfo==8.20.7.6',\n 'claripy==8.20.7.6',\n", "issue": "The version of CFFI>=1.7.0 maybe not correct\n<!--\r\n*Disclaimer:\r\nThe angr suite is maintained by a small team of volunteers.\r\nWhile we cannot guarantee any timeliness for fixes and enhancements, we will do our best.\r\nFor more real-time help with angr, from us and the community, join our [Slack.](http://angr.io/invite/)*\r\n-->\r\n---\r\n\r\n**Describe the bug.**\r\n<!--\r\nPlease include a clear and concise description of what the bug is.\r\n-->\r\nI encounter a error, like this:\r\n```\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.6/dist-packages/angr/project.py\", line 131, in __init__\r\n self.loader = cle.Loader(self.filename, concrete_target=concrete_target, **load_options)\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/loader.py\", line 133, in __init__\r\n self.initial_load_objects = self._internal_load(main_binary, *preload_libs, *force_load_libs, preloading=(main_binary, *preload_libs))\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/loader.py\", line 652, in _internal_load\r\n obj = self._load_object_isolated(main_spec)\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/loader.py\", line 832, in _load_object_isolated\r\n result = backend_cls(binary, binary_stream, is_main_bin=self.main_object is None, loader=self, **options)\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/backends/elf/elf.py\", line 152, in __init__\r\n self._load_plt()\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py\", line 90, in _load_plt\r\n sanity_check=not self.pic)\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py\", line 49, in _add_plt_stub\r\n if sanity_check and target_addr not in [c.value for c in self._block(addr, skip_stmts=False).all_constants]:\r\n File \"/usr/local/lib/python3.6/dist-packages/cle/backends/elf/metaelf.py\", line 42, in _block\r\n return pyvex.IRSB(dat, addr, self.arch, bytes_offset=1 if thumb else 0, opt_level=1, skip_stmts=skip_stmts)\r\n File \"/usr/local/lib/python3.6/dist-packages/pyvex/block.py\", line 115, in __init__\r\n cross_insn_opt=cross_insn_opt,\r\n File \"/usr/local/lib/python3.6/dist-packages/pyvex/lifting/__init__.py\", line 83, in lift\r\n u_data = ffi.from_buffer(ffi.BVoidP, py_data + b'\\0' * 8 if type(py_data) is bytes else py_data)\r\n File \"/home/ling/.local/lib/python3.6/site-packages/cffi/api.py\", line 362, in from_buffer\r\n require_writable)\r\nTypeError: expected an array ctype, got 'void *'\r\n```\r\n\r\nand I solve it by upgrade cffi from cffi-1.12.2 to cffi-1.14.0.\r\nIn the setup.py of angr, the version of cffi is only required >=1.7.0\r\n\r\n**Environment Information.**\r\n<!--\r\nMany common issues are caused by problems with the local Python environment.\r\nBefore submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date.\r\nPlease include the output of `python -m angr.misc.bug_report` here.\r\n-->\r\n\r\n\r\n**To Reproduce.**\r\n<!--\r\nPlease include *both a script to reproduce the crash, and attach the binary used, if possible*\r\n-->\r\nangr is v8.20.7.6\r\n\r\n\r\n**Additional context.**\r\n<!--\r\nAdd any other context about the problem here.\r\n-->\r\n\n", "before_files": [{"content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\nimport glob\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/appendix/migration\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\nfrom distutils.command.clean import clean as _clean\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\ndef _clean_native():\n oglob = glob.glob('native/*.o')\n oglob += glob.glob('native/*.obj')\n oglob += glob.glob('native/*.so')\n oglob += glob.glob('native/*.dll')\n oglob += glob.glob('native/*.dylib')\n for fname in oglob:\n os.unlink(fname)\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\nclass clean(_clean):\n def run(self, *args):\n self.execute(_clean_native, (), msg='Cleaning angr_native')\n _clean.run(self, *args)\n\ncmdclass = {\n 'build': build,\n 'clean': clean,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\n_UNICORN = \"unicorn>=1.0.2rc2\"\n\nsetup(\n name='angr',\n version='8.20.7.6',\n python_requires='>=3.6',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'sortedcontainers',\n 'cachetools',\n 'capstone>=3.0.5rc2',\n 'dpkt',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar2',\n 'rpyc',\n 'cffi>=1.7.0',\n _UNICORN,\n 'archinfo==8.20.7.6',\n 'claripy==8.20.7.6',\n 'cle==8.20.7.6',\n 'pyvex==8.20.7.6',\n 'ailment==8.20.7.6',\n 'GitPython',\n 'psutil',\n 'pycparser>=2.18',\n 'itanium_demangler',\n 'CppHeaderParser',\n 'protobuf',\n ],\n setup_requires=[_UNICORN, 'pyvex'],\n extras_require={\n 'AngrDB': ['sqlalchemy'],\n },\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*', \"py.typed\"]\n }\n)\n", "path": "setup.py"}]}
| 3,255 | 113 |
gh_patches_debug_6038
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-16
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Forbid `handler` as a variable name
We need to add `handler` to our variable blacklist.
</issue>
<code>
[start of wemake_python_styleguide/constants.py]
1 # -*- coding: utf-8 -*-
2
3 BAD_FUNCTIONS = frozenset((
4 # Code generation:
5 'eval',
6 'exec',
7 'compile',
8
9 # Magic:
10 'globals',
11 'locals',
12 'vars',
13 'dir',
14
15 # IO:
16 'input',
17 'help',
18
19 # Attribute access:
20 'hasattr',
21 'delattr',
22 ))
23
24 BAD_IMPORT_FUNCTIONS = frozenset((
25 '__import__',
26 ))
27
28 BAD_MODULE_METADATA_VARIABLES = frozenset((
29 '__author__',
30 ))
31
32 BAD_VARIABLE_NAMES = frozenset((
33 'data',
34 'result',
35 'results',
36 'item',
37 'items',
38 'value',
39 'values',
40 'val',
41 'vals',
42 'var',
43 'vars',
44 'content',
45 'contents',
46 'info',
47 ))
48
49 NESTED_CLASSES_WHITELIST = frozenset((
50 'Meta',
51 ))
52
[end of wemake_python_styleguide/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py
--- a/wemake_python_styleguide/constants.py
+++ b/wemake_python_styleguide/constants.py
@@ -19,6 +19,10 @@
# Attribute access:
'hasattr',
'delattr',
+
+ # Too generic:
+ 'handler',
+ 'handle',
))
BAD_IMPORT_FUNCTIONS = frozenset((
@@ -44,6 +48,7 @@
'content',
'contents',
'info',
+ 'handler',
))
NESTED_CLASSES_WHITELIST = frozenset((
|
{"golden_diff": "diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py\n--- a/wemake_python_styleguide/constants.py\n+++ b/wemake_python_styleguide/constants.py\n@@ -19,6 +19,10 @@\n # Attribute access:\n 'hasattr',\n 'delattr',\n+\n+ # Too generic:\n+ 'handler',\n+ 'handle',\n ))\n \n BAD_IMPORT_FUNCTIONS = frozenset((\n@@ -44,6 +48,7 @@\n 'content',\n 'contents',\n 'info',\n+ 'handler',\n ))\n \n NESTED_CLASSES_WHITELIST = frozenset((\n", "issue": "Forbid `handler` as a variable name\nWe need to add `handler` to our variable blacklist.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nBAD_FUNCTIONS = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'input',\n 'help',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n))\n\nBAD_IMPORT_FUNCTIONS = frozenset((\n '__import__',\n))\n\nBAD_MODULE_METADATA_VARIABLES = frozenset((\n '__author__',\n))\n\nBAD_VARIABLE_NAMES = frozenset((\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'content',\n 'contents',\n 'info',\n))\n\nNESTED_CLASSES_WHITELIST = frozenset((\n 'Meta',\n))\n", "path": "wemake_python_styleguide/constants.py"}]}
| 859 | 143 |
gh_patches_debug_12526
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-245
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Internal Server Error hitting auth/login API
Hitting the `/auth/login` API with a GET request returns an HTTP 500 error. The resource needs to be authenticated.
</issue>
<code>
[start of lemur/auth/views.py]
1 """
2 .. module: lemur.auth.views
3 :platform: Unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <[email protected]>
7 """
8 import jwt
9 import base64
10 import requests
11
12 from flask import g, Blueprint, current_app
13
14 from flask.ext.restful import reqparse, Resource, Api
15 from flask.ext.principal import Identity, identity_changed
16
17 from lemur.common.utils import get_psuedo_random_string
18
19 from lemur.users import service as user_service
20 from lemur.roles import service as role_service
21 from lemur.auth.service import create_token, fetch_token_header, get_rsa_public_key
22
23
24 mod = Blueprint('auth', __name__)
25 api = Api(mod)
26
27
28 class Login(Resource):
29 """
30 Provides an endpoint for Lemur's basic authentication. It takes a username and password
31 combination and returns a JWT token.
32
33 This token token is required for each API request and must be provided in the Authorization Header for the request.
34 ::
35
36 Authorization:Bearer <token>
37
38 Tokens have a set expiration date. You can inspect the token expiration by base64 decoding the token and inspecting
39 it's contents.
40
41 .. note:: It is recommended that the token expiration is fairly short lived (hours not days). This will largely depend \
42 on your uses cases but. It is important to not that there is currently no build in method to revoke a users token \
43 and force re-authentication.
44 """
45 def __init__(self):
46 self.reqparse = reqparse.RequestParser()
47 super(Login, self).__init__()
48
49 def post(self):
50 """
51 .. http:post:: /auth/login
52
53 Login with username:password
54
55 **Example request**:
56
57 .. sourcecode:: http
58
59 POST /auth/login HTTP/1.1
60 Host: example.com
61 Accept: application/json, text/javascript
62
63 {
64 "username": "test",
65 "password": "test"
66 }
67
68 **Example response**:
69
70 .. sourcecode:: http
71
72 HTTP/1.1 200 OK
73 Vary: Accept
74 Content-Type: text/javascript
75
76 {
77 "token": "12343243243"
78 }
79
80 :arg username: username
81 :arg password: password
82 :statuscode 401: invalid credentials
83 :statuscode 200: no error
84 """
85 self.reqparse.add_argument('username', type=str, required=True, location='json')
86 self.reqparse.add_argument('password', type=str, required=True, location='json')
87
88 args = self.reqparse.parse_args()
89
90 if '@' in args['username']:
91 user = user_service.get_by_email(args['username'])
92 else:
93 user = user_service.get_by_username(args['username'])
94
95 if user and user.check_password(args['password']):
96 # Tell Flask-Principal the identity changed
97 identity_changed.send(current_app._get_current_object(),
98 identity=Identity(user.id))
99 return dict(token=create_token(user))
100
101 return dict(message='The supplied credentials are invalid'), 401
102
103 def get(self):
104 return {'username': g.current_user.username, 'roles': [r.name for r in g.current_user.roles]}
105
106
107 class Ping(Resource):
108 """
109 This class serves as an example of how one might implement an SSO provider for use with Lemur. In
110 this example we use a OpenIDConnect authentication flow, that is essentially OAuth2 underneath. If you have an
111 OAuth2 provider you want to use Lemur there would be two steps:
112
113 1. Define your own class that inherits from :class:`flask.ext.restful.Resource` and create the HTTP methods the \
114 provider uses for it's callbacks.
115 2. Add or change the Lemur AngularJS Configuration to point to your new provider
116 """
117 def __init__(self):
118 self.reqparse = reqparse.RequestParser()
119 super(Ping, self).__init__()
120
121 def post(self):
122 self.reqparse.add_argument('clientId', type=str, required=True, location='json')
123 self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
124 self.reqparse.add_argument('code', type=str, required=True, location='json')
125
126 args = self.reqparse.parse_args()
127
128 # take the information we have received from the provider to create a new request
129 params = {
130 'client_id': args['clientId'],
131 'grant_type': 'authorization_code',
132 'scope': 'openid email profile address',
133 'redirect_uri': args['redirectUri'],
134 'code': args['code']
135 }
136
137 # you can either discover these dynamically or simply configure them
138 access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL')
139 user_api_url = current_app.config.get('PING_USER_API_URL')
140
141 # the secret and cliendId will be given to you when you signup for the provider
142 basic = base64.b64encode('{0}:{1}'.format(args['clientId'], current_app.config.get("PING_SECRET")))
143 headers = {'Authorization': 'Basic {0}'.format(basic)}
144
145 # exchange authorization code for access token.
146
147 r = requests.post(access_token_url, headers=headers, params=params)
148 id_token = r.json()['id_token']
149 access_token = r.json()['access_token']
150
151 # fetch token public key
152 header_data = fetch_token_header(id_token)
153 jwks_url = current_app.config.get('PING_JWKS_URL')
154
155 # retrieve the key material as specified by the token header
156 r = requests.get(jwks_url)
157 for key in r.json()['keys']:
158 if key['kid'] == header_data['kid']:
159 secret = get_rsa_public_key(key['n'], key['e'])
160 algo = header_data['alg']
161 break
162 else:
163 return dict(message='Key not found'), 403
164
165 # validate your token based on the key it was signed with
166 try:
167 jwt.decode(id_token, secret, algorithms=[algo], audience=args['clientId'])
168 except jwt.DecodeError:
169 return dict(message='Token is invalid'), 403
170 except jwt.ExpiredSignatureError:
171 return dict(message='Token has expired'), 403
172 except jwt.InvalidTokenError:
173 return dict(message='Token is invalid'), 403
174
175 user_params = dict(access_token=access_token, schema='profile')
176
177 # retrieve information about the current user.
178 r = requests.get(user_api_url, params=user_params)
179 profile = r.json()
180
181 user = user_service.get_by_email(profile['email'])
182
183 # update their google 'roles'
184 roles = []
185
186 for group in profile['googleGroups']:
187 role = role_service.get_by_name(group)
188 if not role:
189 role = role_service.create(group, description='This is a google group based role created by Lemur')
190 roles.append(role)
191
192 # if we get an sso user create them an account
193 # we still pick a random password in case sso is down
194 if not user:
195
196 # every user is an operator (tied to a default role)
197 if current_app.config.get('LEMUR_DEFAULT_ROLE'):
198 v = role_service.get_by_name(current_app.config.get('LEMUR_DEFAULT_ROLE'))
199 if v:
200 roles.append(v)
201
202 user = user_service.create(
203 profile['email'],
204 get_psuedo_random_string(),
205 profile['email'],
206 True,
207 profile.get('thumbnailPhotoUrl'),
208 roles
209 )
210
211 else:
212 # we add 'lemur' specific roles, so they do not get marked as removed
213 for ur in user.roles:
214 if ur.authority_id:
215 roles.append(ur)
216
217 # update any changes to the user
218 user_service.update(
219 user.id,
220 profile['email'],
221 profile['email'],
222 True,
223 profile.get('thumbnailPhotoUrl'), # incase profile isn't google+ enabled
224 roles
225 )
226
227 # Tell Flask-Principal the identity changed
228 identity_changed.send(current_app._get_current_object(), identity=Identity(user.id))
229
230 return dict(token=create_token(user))
231
232
233 class Google(Resource):
234 def __init__(self):
235 self.reqparse = reqparse.RequestParser()
236 super(Google, self).__init__()
237
238 def post(self):
239 access_token_url = 'https://accounts.google.com/o/oauth2/token'
240 people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
241
242 self.reqparse.add_argument('clientId', type=str, required=True, location='json')
243 self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
244 self.reqparse.add_argument('code', type=str, required=True, location='json')
245
246 args = self.reqparse.parse_args()
247
248 # Step 1. Exchange authorization code for access token
249 payload = {
250 'client_id': args['clientId'],
251 'grant_type': 'authorization_code',
252 'redirect_uri': args['redirectUri'],
253 'code': args['code'],
254 'client_secret': current_app.config.get('GOOGLE_SECRET')
255 }
256
257 r = requests.post(access_token_url, data=payload)
258 token = r.json()
259
260 # Step 2. Retrieve information about the current user
261 headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
262
263 r = requests.get(people_api_url, headers=headers)
264 profile = r.json()
265
266 user = user_service.get_by_email(profile['email'])
267
268 if user:
269 return dict(token=create_token(user))
270
271
272 class Providers(Resource):
273 def get(self):
274 active_providers = []
275
276 for provider in current_app.config.get("ACTIVE_PROVIDERS"):
277 provider = provider.lower()
278
279 if provider == "google":
280 active_providers.append({
281 'name': 'google',
282 'clientId': current_app.config.get("GOOGLE_CLIENT_ID"),
283 'url': api.url_for(Google)
284 })
285
286 elif provider == "ping":
287 active_providers.append({
288 'name': current_app.config.get("PING_NAME"),
289 'url': current_app.config.get('PING_REDIRECT_URI'),
290 'redirectUri': current_app.config.get("PING_REDIRECT_URI"),
291 'clientId': current_app.config.get("PING_CLIENT_ID"),
292 'responseType': 'code',
293 'scope': ['openid', 'email', 'profile', 'address'],
294 'scopeDelimiter': ' ',
295 'authorizationEndpoint': current_app.config.get("PING_AUTH_ENDPOINT"),
296 'requiredUrlParams': ['scope'],
297 'type': '2.0'
298 })
299
300 return active_providers
301
302
303 api.add_resource(Login, '/auth/login', endpoint='login')
304 api.add_resource(Ping, '/auth/ping', endpoint='ping')
305 api.add_resource(Google, '/auth/google', endpoint='google')
306 api.add_resource(Providers, '/auth/providers', endpoint='providers')
307
[end of lemur/auth/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -9,7 +9,7 @@
import base64
import requests
-from flask import g, Blueprint, current_app
+from flask import Blueprint, current_app
from flask.ext.restful import reqparse, Resource, Api
from flask.ext.principal import Identity, identity_changed
@@ -100,9 +100,6 @@
return dict(message='The supplied credentials are invalid'), 401
- def get(self):
- return {'username': g.current_user.username, 'roles': [r.name for r in g.current_user.roles]}
-
class Ping(Resource):
"""
|
{"golden_diff": "diff --git a/lemur/auth/views.py b/lemur/auth/views.py\n--- a/lemur/auth/views.py\n+++ b/lemur/auth/views.py\n@@ -9,7 +9,7 @@\n import base64\n import requests\n \n-from flask import g, Blueprint, current_app\n+from flask import Blueprint, current_app\n \n from flask.ext.restful import reqparse, Resource, Api\n from flask.ext.principal import Identity, identity_changed\n@@ -100,9 +100,6 @@\n \n return dict(message='The supplied credentials are invalid'), 401\n \n- def get(self):\n- return {'username': g.current_user.username, 'roles': [r.name for r in g.current_user.roles]}\n-\n \n class Ping(Resource):\n \"\"\"\n", "issue": "Internal Server Error hitting auth/login API\nHitting the `/auth/login` API with a GET request returns an HTTP 500 error. The resource needs to be authenticated.\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.auth.views\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport jwt\nimport base64\nimport requests\n\nfrom flask import g, Blueprint, current_app\n\nfrom flask.ext.restful import reqparse, Resource, Api\nfrom flask.ext.principal import Identity, identity_changed\n\nfrom lemur.common.utils import get_psuedo_random_string\n\nfrom lemur.users import service as user_service\nfrom lemur.roles import service as role_service\nfrom lemur.auth.service import create_token, fetch_token_header, get_rsa_public_key\n\n\nmod = Blueprint('auth', __name__)\napi = Api(mod)\n\n\nclass Login(Resource):\n \"\"\"\n Provides an endpoint for Lemur's basic authentication. It takes a username and password\n combination and returns a JWT token.\n\n This token token is required for each API request and must be provided in the Authorization Header for the request.\n ::\n\n Authorization:Bearer <token>\n\n Tokens have a set expiration date. You can inspect the token expiration by base64 decoding the token and inspecting\n it's contents.\n\n .. note:: It is recommended that the token expiration is fairly short lived (hours not days). This will largely depend \\\n on your uses cases but. It is important to not that there is currently no build in method to revoke a users token \\\n and force re-authentication.\n \"\"\"\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super(Login, self).__init__()\n\n def post(self):\n \"\"\"\n .. http:post:: /auth/login\n\n Login with username:password\n\n **Example request**:\n\n .. sourcecode:: http\n\n POST /auth/login HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n {\n \"username\": \"test\",\n \"password\": \"test\"\n }\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n \"token\": \"12343243243\"\n }\n\n :arg username: username\n :arg password: password\n :statuscode 401: invalid credentials\n :statuscode 200: no error\n \"\"\"\n self.reqparse.add_argument('username', type=str, required=True, location='json')\n self.reqparse.add_argument('password', type=str, required=True, location='json')\n\n args = self.reqparse.parse_args()\n\n if '@' in args['username']:\n user = user_service.get_by_email(args['username'])\n else:\n user = user_service.get_by_username(args['username'])\n\n if user and user.check_password(args['password']):\n # Tell Flask-Principal the identity changed\n identity_changed.send(current_app._get_current_object(),\n identity=Identity(user.id))\n return dict(token=create_token(user))\n\n return dict(message='The supplied credentials are invalid'), 401\n\n def get(self):\n return {'username': g.current_user.username, 'roles': [r.name for r in g.current_user.roles]}\n\n\nclass Ping(Resource):\n \"\"\"\n This class serves as an example of how one might implement an SSO provider for use with Lemur. In\n this example we use a OpenIDConnect authentication flow, that is essentially OAuth2 underneath. If you have an\n OAuth2 provider you want to use Lemur there would be two steps:\n\n 1. Define your own class that inherits from :class:`flask.ext.restful.Resource` and create the HTTP methods the \\\n provider uses for it's callbacks.\n 2. Add or change the Lemur AngularJS Configuration to point to your new provider\n \"\"\"\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super(Ping, self).__init__()\n\n def post(self):\n self.reqparse.add_argument('clientId', type=str, required=True, location='json')\n self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')\n self.reqparse.add_argument('code', type=str, required=True, location='json')\n\n args = self.reqparse.parse_args()\n\n # take the information we have received from the provider to create a new request\n params = {\n 'client_id': args['clientId'],\n 'grant_type': 'authorization_code',\n 'scope': 'openid email profile address',\n 'redirect_uri': args['redirectUri'],\n 'code': args['code']\n }\n\n # you can either discover these dynamically or simply configure them\n access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL')\n user_api_url = current_app.config.get('PING_USER_API_URL')\n\n # the secret and cliendId will be given to you when you signup for the provider\n basic = base64.b64encode('{0}:{1}'.format(args['clientId'], current_app.config.get(\"PING_SECRET\")))\n headers = {'Authorization': 'Basic {0}'.format(basic)}\n\n # exchange authorization code for access token.\n\n r = requests.post(access_token_url, headers=headers, params=params)\n id_token = r.json()['id_token']\n access_token = r.json()['access_token']\n\n # fetch token public key\n header_data = fetch_token_header(id_token)\n jwks_url = current_app.config.get('PING_JWKS_URL')\n\n # retrieve the key material as specified by the token header\n r = requests.get(jwks_url)\n for key in r.json()['keys']:\n if key['kid'] == header_data['kid']:\n secret = get_rsa_public_key(key['n'], key['e'])\n algo = header_data['alg']\n break\n else:\n return dict(message='Key not found'), 403\n\n # validate your token based on the key it was signed with\n try:\n jwt.decode(id_token, secret, algorithms=[algo], audience=args['clientId'])\n except jwt.DecodeError:\n return dict(message='Token is invalid'), 403\n except jwt.ExpiredSignatureError:\n return dict(message='Token has expired'), 403\n except jwt.InvalidTokenError:\n return dict(message='Token is invalid'), 403\n\n user_params = dict(access_token=access_token, schema='profile')\n\n # retrieve information about the current user.\n r = requests.get(user_api_url, params=user_params)\n profile = r.json()\n\n user = user_service.get_by_email(profile['email'])\n\n # update their google 'roles'\n roles = []\n\n for group in profile['googleGroups']:\n role = role_service.get_by_name(group)\n if not role:\n role = role_service.create(group, description='This is a google group based role created by Lemur')\n roles.append(role)\n\n # if we get an sso user create them an account\n # we still pick a random password in case sso is down\n if not user:\n\n # every user is an operator (tied to a default role)\n if current_app.config.get('LEMUR_DEFAULT_ROLE'):\n v = role_service.get_by_name(current_app.config.get('LEMUR_DEFAULT_ROLE'))\n if v:\n roles.append(v)\n\n user = user_service.create(\n profile['email'],\n get_psuedo_random_string(),\n profile['email'],\n True,\n profile.get('thumbnailPhotoUrl'),\n roles\n )\n\n else:\n # we add 'lemur' specific roles, so they do not get marked as removed\n for ur in user.roles:\n if ur.authority_id:\n roles.append(ur)\n\n # update any changes to the user\n user_service.update(\n user.id,\n profile['email'],\n profile['email'],\n True,\n profile.get('thumbnailPhotoUrl'), # incase profile isn't google+ enabled\n roles\n )\n\n # Tell Flask-Principal the identity changed\n identity_changed.send(current_app._get_current_object(), identity=Identity(user.id))\n\n return dict(token=create_token(user))\n\n\nclass Google(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super(Google, self).__init__()\n\n def post(self):\n access_token_url = 'https://accounts.google.com/o/oauth2/token'\n people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'\n\n self.reqparse.add_argument('clientId', type=str, required=True, location='json')\n self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')\n self.reqparse.add_argument('code', type=str, required=True, location='json')\n\n args = self.reqparse.parse_args()\n\n # Step 1. Exchange authorization code for access token\n payload = {\n 'client_id': args['clientId'],\n 'grant_type': 'authorization_code',\n 'redirect_uri': args['redirectUri'],\n 'code': args['code'],\n 'client_secret': current_app.config.get('GOOGLE_SECRET')\n }\n\n r = requests.post(access_token_url, data=payload)\n token = r.json()\n\n # Step 2. Retrieve information about the current user\n headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}\n\n r = requests.get(people_api_url, headers=headers)\n profile = r.json()\n\n user = user_service.get_by_email(profile['email'])\n\n if user:\n return dict(token=create_token(user))\n\n\nclass Providers(Resource):\n def get(self):\n active_providers = []\n\n for provider in current_app.config.get(\"ACTIVE_PROVIDERS\"):\n provider = provider.lower()\n\n if provider == \"google\":\n active_providers.append({\n 'name': 'google',\n 'clientId': current_app.config.get(\"GOOGLE_CLIENT_ID\"),\n 'url': api.url_for(Google)\n })\n\n elif provider == \"ping\":\n active_providers.append({\n 'name': current_app.config.get(\"PING_NAME\"),\n 'url': current_app.config.get('PING_REDIRECT_URI'),\n 'redirectUri': current_app.config.get(\"PING_REDIRECT_URI\"),\n 'clientId': current_app.config.get(\"PING_CLIENT_ID\"),\n 'responseType': 'code',\n 'scope': ['openid', 'email', 'profile', 'address'],\n 'scopeDelimiter': ' ',\n 'authorizationEndpoint': current_app.config.get(\"PING_AUTH_ENDPOINT\"),\n 'requiredUrlParams': ['scope'],\n 'type': '2.0'\n })\n\n return active_providers\n\n\napi.add_resource(Login, '/auth/login', endpoint='login')\napi.add_resource(Ping, '/auth/ping', endpoint='ping')\napi.add_resource(Google, '/auth/google', endpoint='google')\napi.add_resource(Providers, '/auth/providers', endpoint='providers')\n", "path": "lemur/auth/views.py"}]}
| 3,779 | 167 |
gh_patches_debug_49488
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-104
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'pex <dir>' doesn't actually work unless an .egg-info is already there
It turns out that pex for some reason generates an .egg-info missing setup.py, which causes 'pex .' to fail in a fresh clone.
``` bash
mba=flask=; ~/clients/pex/dist/pex -v -v -v -v -v .
**** Failed to install Flask-0.11.dev0. stdout:
**** Failed to install Flask-0.11.dev0. stderr:
Traceback (most recent call last):
File "<stdin>", line 6, in <module>
IOError: [Errno 2] No such file or directory: 'setup.py'
```
</issue>
<code>
[start of pex/installer.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import, print_function
5
6 import os
7 import subprocess
8 import sys
9 import tempfile
10
11 from pkg_resources import Distribution, PathMetadata
12
13 from .common import safe_mkdtemp, safe_rmtree
14 from .interpreter import PythonInterpreter
15 from .tracer import TRACER
16 from .version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT
17
18 __all__ = (
19 'Installer',
20 'Packager'
21 )
22
23
24 def after_installation(function):
25 def function_wrapper(self, *args, **kw):
26 self._installed = self.run()
27 if not self._installed:
28 raise Installer.InstallFailure('Failed to install %s' % self._source_dir)
29 return function(self, *args, **kw)
30 return function_wrapper
31
32
33 class InstallerBase(object):
34 SETUP_BOOTSTRAP_HEADER = "import sys"
35 SETUP_BOOTSTRAP_MODULE = "sys.path.insert(0, %(path)r); import %(module)s"
36 SETUP_BOOTSTRAP_FOOTER = """
37 __file__ = 'setup.py'
38 exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
39 """
40
41 class Error(Exception): pass
42 class InstallFailure(Error): pass
43 class IncapableInterpreter(Error): pass
44
45 def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):
46 """
47 Create an installer from an unpacked source distribution in source_dir.
48
49 If strict=True, fail if any installation dependencies (e.g. distribute)
50 are missing.
51 """
52 self._source_dir = source_dir
53 self._install_tmp = install_dir or safe_mkdtemp()
54 self._installed = None
55 self._strict = strict
56 self._interpreter = interpreter or PythonInterpreter.get()
57 if not self._interpreter.satisfies(self.capability) and strict:
58 raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (
59 self._interpreter.binary, self.__class__.__name__))
60
61 def mixins(self):
62 """Return a map from import name to requirement to load into setup script prior to invocation.
63
64 May be subclassed.
65 """
66 return {}
67
68 @property
69 def install_tmp(self):
70 return self._install_tmp
71
72 def _setup_command(self):
73 """the setup command-line to run, to be implemented by subclasses."""
74 raise NotImplementedError
75
76 def _postprocess(self):
77 """a post-processing function to run following setup.py invocation."""
78
79 @property
80 def capability(self):
81 """returns the list of requirements for the interpreter to run this installer."""
82 return list(self.mixins().values())
83
84 @property
85 def bootstrap_script(self):
86 bootstrap_modules = []
87 for module, requirement in self.mixins().items():
88 path = self._interpreter.get_location(requirement)
89 if not path:
90 assert not self._strict # This should be caught by validation
91 continue
92 bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module})
93 return '\n'.join(
94 [self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER])
95
96 def run(self):
97 if self._installed is not None:
98 return self._installed
99
100 with TRACER.timed('Installing %s' % self._install_tmp, V=2):
101 command = [self._interpreter.binary, '-']
102 command.extend(self._setup_command())
103 po = subprocess.Popen(command,
104 stdin=subprocess.PIPE,
105 stdout=subprocess.PIPE,
106 stderr=subprocess.PIPE,
107 env=self._interpreter.sanitized_environment(),
108 cwd=self._source_dir)
109 so, se = po.communicate(self.bootstrap_script.encode('ascii'))
110 self._installed = po.returncode == 0
111
112 if not self._installed:
113 name = os.path.basename(self._source_dir)
114 print('**** Failed to install %s. stdout:\n%s' % (name, so.decode('utf-8')), file=sys.stderr)
115 print('**** Failed to install %s. stderr:\n%s' % (name, se.decode('utf-8')), file=sys.stderr)
116 return self._installed
117
118 self._postprocess()
119 return self._installed
120
121 def cleanup(self):
122 safe_rmtree(self._install_tmp)
123
124
125 class Installer(InstallerBase):
126 """Install an unpacked distribution with a setup.py."""
127
128 def __init__(self, source_dir, strict=True, interpreter=None):
129 """
130 Create an installer from an unpacked source distribution in source_dir.
131
132 If strict=True, fail if any installation dependencies (e.g. setuptools)
133 are missing.
134 """
135 super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter)
136 self._egg_info = None
137 fd, self._install_record = tempfile.mkstemp()
138 os.close(fd)
139
140 def _setup_command(self):
141 return ['install',
142 '--root=%s' % self._install_tmp,
143 '--prefix=',
144 '--single-version-externally-managed',
145 '--record', self._install_record]
146
147 def _postprocess(self):
148 installed_files = []
149 egg_info = None
150 with open(self._install_record) as fp:
151 installed_files = fp.read().splitlines()
152 for line in installed_files:
153 if line.endswith('.egg-info'):
154 assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!'
155 egg_info = line
156 break
157
158 if not egg_info:
159 self._installed = False
160 return self._installed
161
162 installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info]
163
164 self._egg_info = os.path.join(self._install_tmp, egg_info[1:])
165 with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp:
166 fp.write('\n'.join(installed_files))
167 fp.write('\n')
168
169 return self._installed
170
171 @after_installation
172 def egg_info(self):
173 return self._egg_info
174
175 @after_installation
176 def root(self):
177 egg_info = self.egg_info()
178 assert egg_info
179 return os.path.realpath(os.path.dirname(egg_info))
180
181 @after_installation
182 def distribution(self):
183 base_dir = self.root()
184 egg_info = self.egg_info()
185 metadata = PathMetadata(base_dir, egg_info)
186 return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata)
187
188
189 class DistributionPackager(InstallerBase):
190 def mixins(self):
191 mixins = super(DistributionPackager, self).mixins().copy()
192 mixins.update(setuptools='setuptools>=1')
193 return mixins
194
195 def find_distribution(self):
196 dists = os.listdir(self.install_tmp)
197 if len(dists) == 0:
198 raise self.InstallFailure('No distributions were produced!')
199 elif len(dists) > 1:
200 raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists)))
201 else:
202 return os.path.join(self.install_tmp, dists[0])
203
204
205 class Packager(DistributionPackager):
206 """
207 Create a source distribution from an unpacked setup.py-based project.
208 """
209
210 def _setup_command(self):
211 return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp]
212
213 @after_installation
214 def sdist(self):
215 return self.find_distribution()
216
217
218 class EggInstaller(DistributionPackager):
219 """
220 Create a source distribution from an unpacked setup.py-based project.
221 """
222
223 def _setup_command(self):
224 return ['bdist_egg', '--dist-dir=%s' % self._install_tmp]
225
226 @after_installation
227 def bdist(self):
228 return self.find_distribution()
229
230
231 class WheelInstaller(DistributionPackager):
232 """
233 Create a source distribution from an unpacked setup.py-based project.
234 """
235 MIXINS = {
236 'setuptools': SETUPTOOLS_REQUIREMENT,
237 'wheel': WHEEL_REQUIREMENT,
238 }
239
240 def mixins(self):
241 mixins = super(WheelInstaller, self).mixins().copy()
242 mixins.update(self.MIXINS)
243 return mixins
244
245 def _setup_command(self):
246 return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp]
247
248 @after_installation
249 def bdist(self):
250 return self.find_distribution()
251
[end of pex/installer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/installer.py b/pex/installer.py
--- a/pex/installer.py
+++ b/pex/installer.py
@@ -35,6 +35,7 @@
SETUP_BOOTSTRAP_MODULE = "sys.path.insert(0, %(path)r); import %(module)s"
SETUP_BOOTSTRAP_FOOTER = """
__file__ = 'setup.py'
+sys.argv[0] = 'setup.py'
exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
|
{"golden_diff": "diff --git a/pex/installer.py b/pex/installer.py\n--- a/pex/installer.py\n+++ b/pex/installer.py\n@@ -35,6 +35,7 @@\n SETUP_BOOTSTRAP_MODULE = \"sys.path.insert(0, %(path)r); import %(module)s\"\n SETUP_BOOTSTRAP_FOOTER = \"\"\"\n __file__ = 'setup.py'\n+sys.argv[0] = 'setup.py'\n exec(compile(open(__file__).read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\n \"\"\"\n", "issue": "'pex <dir>' doesn't actually work unless an .egg-info is already there\nIt turns out that pex for some reason generates an .egg-info missing setup.py, which causes 'pex .' to fail in a fresh clone.\n\n``` bash\nmba=flask=; ~/clients/pex/dist/pex -v -v -v -v -v .\n**** Failed to install Flask-0.11.dev0. stdout:\n\n**** Failed to install Flask-0.11.dev0. stderr:\nTraceback (most recent call last):\n File \"<stdin>\", line 6, in <module>\nIOError: [Errno 2] No such file or directory: 'setup.py'\n```\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom pkg_resources import Distribution, PathMetadata\n\nfrom .common import safe_mkdtemp, safe_rmtree\nfrom .interpreter import PythonInterpreter\nfrom .tracer import TRACER\nfrom .version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT\n\n__all__ = (\n 'Installer',\n 'Packager'\n)\n\n\ndef after_installation(function):\n def function_wrapper(self, *args, **kw):\n self._installed = self.run()\n if not self._installed:\n raise Installer.InstallFailure('Failed to install %s' % self._source_dir)\n return function(self, *args, **kw)\n return function_wrapper\n\n\nclass InstallerBase(object):\n SETUP_BOOTSTRAP_HEADER = \"import sys\"\n SETUP_BOOTSTRAP_MODULE = \"sys.path.insert(0, %(path)r); import %(module)s\"\n SETUP_BOOTSTRAP_FOOTER = \"\"\"\n__file__ = 'setup.py'\nexec(compile(open(__file__).read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\n\"\"\"\n\n class Error(Exception): pass\n class InstallFailure(Error): pass\n class IncapableInterpreter(Error): pass\n\n def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. distribute)\n are missing.\n \"\"\"\n self._source_dir = source_dir\n self._install_tmp = install_dir or safe_mkdtemp()\n self._installed = None\n self._strict = strict\n self._interpreter = interpreter or PythonInterpreter.get()\n if not self._interpreter.satisfies(self.capability) and strict:\n raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (\n self._interpreter.binary, self.__class__.__name__))\n\n def mixins(self):\n \"\"\"Return a map from import name to requirement to load into setup script prior to invocation.\n\n May be subclassed.\n \"\"\"\n return {}\n\n @property\n def install_tmp(self):\n return self._install_tmp\n\n def _setup_command(self):\n \"\"\"the setup command-line to run, to be implemented by subclasses.\"\"\"\n raise NotImplementedError\n\n def _postprocess(self):\n \"\"\"a post-processing function to run following setup.py invocation.\"\"\"\n\n @property\n def capability(self):\n \"\"\"returns the list of requirements for the interpreter to run this installer.\"\"\"\n return list(self.mixins().values())\n\n @property\n def bootstrap_script(self):\n bootstrap_modules = []\n for module, requirement in self.mixins().items():\n path = self._interpreter.get_location(requirement)\n if not path:\n assert not self._strict # This should be caught by validation\n continue\n bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module})\n return '\\n'.join(\n [self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER])\n\n def run(self):\n if self._installed is not None:\n return self._installed\n\n with TRACER.timed('Installing %s' % self._install_tmp, V=2):\n command = [self._interpreter.binary, '-']\n command.extend(self._setup_command())\n po = subprocess.Popen(command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self._interpreter.sanitized_environment(),\n cwd=self._source_dir)\n so, se = po.communicate(self.bootstrap_script.encode('ascii'))\n self._installed = po.returncode == 0\n\n if not self._installed:\n name = os.path.basename(self._source_dir)\n print('**** Failed to install %s. stdout:\\n%s' % (name, so.decode('utf-8')), file=sys.stderr)\n print('**** Failed to install %s. stderr:\\n%s' % (name, se.decode('utf-8')), file=sys.stderr)\n return self._installed\n\n self._postprocess()\n return self._installed\n\n def cleanup(self):\n safe_rmtree(self._install_tmp)\n\n\nclass Installer(InstallerBase):\n \"\"\"Install an unpacked distribution with a setup.py.\"\"\"\n\n def __init__(self, source_dir, strict=True, interpreter=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. setuptools)\n are missing.\n \"\"\"\n super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter)\n self._egg_info = None\n fd, self._install_record = tempfile.mkstemp()\n os.close(fd)\n\n def _setup_command(self):\n return ['install',\n '--root=%s' % self._install_tmp,\n '--prefix=',\n '--single-version-externally-managed',\n '--record', self._install_record]\n\n def _postprocess(self):\n installed_files = []\n egg_info = None\n with open(self._install_record) as fp:\n installed_files = fp.read().splitlines()\n for line in installed_files:\n if line.endswith('.egg-info'):\n assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!'\n egg_info = line\n break\n\n if not egg_info:\n self._installed = False\n return self._installed\n\n installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info]\n\n self._egg_info = os.path.join(self._install_tmp, egg_info[1:])\n with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp:\n fp.write('\\n'.join(installed_files))\n fp.write('\\n')\n\n return self._installed\n\n @after_installation\n def egg_info(self):\n return self._egg_info\n\n @after_installation\n def root(self):\n egg_info = self.egg_info()\n assert egg_info\n return os.path.realpath(os.path.dirname(egg_info))\n\n @after_installation\n def distribution(self):\n base_dir = self.root()\n egg_info = self.egg_info()\n metadata = PathMetadata(base_dir, egg_info)\n return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata)\n\n\nclass DistributionPackager(InstallerBase):\n def mixins(self):\n mixins = super(DistributionPackager, self).mixins().copy()\n mixins.update(setuptools='setuptools>=1')\n return mixins\n\n def find_distribution(self):\n dists = os.listdir(self.install_tmp)\n if len(dists) == 0:\n raise self.InstallFailure('No distributions were produced!')\n elif len(dists) > 1:\n raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists)))\n else:\n return os.path.join(self.install_tmp, dists[0])\n\n\nclass Packager(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def sdist(self):\n return self.find_distribution()\n\n\nclass EggInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n return ['bdist_egg', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n\n\nclass WheelInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n MIXINS = {\n 'setuptools': SETUPTOOLS_REQUIREMENT,\n 'wheel': WHEEL_REQUIREMENT,\n }\n\n def mixins(self):\n mixins = super(WheelInstaller, self).mixins().copy()\n mixins.update(self.MIXINS)\n return mixins\n\n def _setup_command(self):\n return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n", "path": "pex/installer.py"}]}
| 3,194 | 121 |
gh_patches_debug_13847
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18915
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
triangular
</issue>
<code>
[start of ivy/functional/frontends/numpy/random/functions.py]
1 # local
2 import ivy
3 from ivy.functional.frontends.numpy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 from_zero_dim_arrays_to_scalar,
6 )
7
8
9 @to_ivy_arrays_and_back
10 @from_zero_dim_arrays_to_scalar
11 def random_sample(size=None):
12 return ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
13
14
15 @to_ivy_arrays_and_back
16 @from_zero_dim_arrays_to_scalar
17 def dirichlet(alpha, size=None):
18 return ivy.dirichlet(alpha, size=size)
19
20
21 @to_ivy_arrays_and_back
22 @from_zero_dim_arrays_to_scalar
23 def uniform(low=0.0, high=1.0, size=None):
24 return ivy.random_uniform(low=low, high=high, shape=size, dtype="float64")
25
26
27 @to_ivy_arrays_and_back
28 @from_zero_dim_arrays_to_scalar
29 def geometric(p, size=None):
30 if p < 0 or p > 1:
31 raise ValueError("p must be in the interval [0, 1]")
32 oneMinusP = ivy.subtract(1, p)
33 sizeMinusOne = ivy.subtract(size, 1)
34
35 return ivy.multiply(ivy.pow(oneMinusP, sizeMinusOne), p)
36
37
38 @to_ivy_arrays_and_back
39 @from_zero_dim_arrays_to_scalar
40 def normal(loc=0.0, scale=1.0, size=None):
41 return ivy.random_normal(mean=loc, std=scale, shape=size, dtype="float64")
42
43
44 @to_ivy_arrays_and_back
45 @from_zero_dim_arrays_to_scalar
46 def poisson(lam=1.0, size=None):
47 return ivy.poisson(lam=lam, shape=size)
48
49
50 @to_ivy_arrays_and_back
51 @from_zero_dim_arrays_to_scalar
52 def multinomial(n, pvals, size=None):
53 assert not ivy.exists(size) or (len(size) > 0 and len(size) < 3)
54 batch_size = 1
55 if ivy.exists(size):
56 if len(size) == 2:
57 batch_size = size[0]
58 num_samples = size[1]
59 else:
60 num_samples = size[0]
61 else:
62 num_samples = len(pvals)
63 return ivy.multinomial(n, num_samples, batch_size=batch_size, probs=pvals)
64
65
66 @to_ivy_arrays_and_back
67 @from_zero_dim_arrays_to_scalar
68 def permutation(x, /):
69 if isinstance(x, int):
70 x = ivy.arange(x)
71 return ivy.shuffle(x)
72
73
74 @to_ivy_arrays_and_back
75 @from_zero_dim_arrays_to_scalar
76 def beta(a, b, size=None):
77 return ivy.beta(a, b, shape=size)
78
79
80 @to_ivy_arrays_and_back
81 @from_zero_dim_arrays_to_scalar
82 def shuffle(x, axis=0, /):
83 if isinstance(x, int):
84 x = ivy.arange(x)
85 return ivy.shuffle(x, axis)
86
87
88 @to_ivy_arrays_and_back
89 @from_zero_dim_arrays_to_scalar
90 def standard_normal(size=None):
91 return ivy.random_normal(mean=0.0, std=1.0, shape=size, dtype="float64")
92
93
94 @to_ivy_arrays_and_back
95 @from_zero_dim_arrays_to_scalar
96 def standard_gamma(shape, size=None):
97 return ivy.gamma(shape, 1.0, shape=size, dtype="float64")
98
99
100 @to_ivy_arrays_and_back
101 @from_zero_dim_arrays_to_scalar
102 def binomial(n, p, size=None):
103 if p < 0 or p > 1:
104 raise ValueError("p must be in the interval (0, 1)")
105 if n < 0:
106 raise ValueError("n must be strictly positive")
107 if size is None:
108 size = 1
109 else:
110 size = size
111 if isinstance(size, int):
112 size = (size,)
113 lambda_ = ivy.multiply(n, p)
114 return ivy.poisson(lambda_, shape=size)
115
116
117 @to_ivy_arrays_and_back
118 @from_zero_dim_arrays_to_scalar
119 def chisquare(df, size=None):
120 df = ivy.array(df) # scalar ints and floats are also array_like
121 if ivy.any(df <= 0):
122 raise ValueError("df <= 0")
123
124 # ivy.gamma() throws an error if both alpha is an array and a shape is passed
125 # so this part broadcasts df into the shape of `size`` first to keep it happy.
126 if size is not None:
127 df = df * ivy.ones(size)
128
129 return ivy.gamma(df / 2, 2, dtype="float64")
130
131
132 @to_ivy_arrays_and_back
133 @from_zero_dim_arrays_to_scalar
134 def lognormal(mean=0.0, sigma=1.0, size=None):
135 ret = ivy.exp(ivy.random_normal(mean=mean, std=sigma, shape=size, dtype="float64"))
136 return ret
137
138
139 @to_ivy_arrays_and_back
140 @from_zero_dim_arrays_to_scalar
141 def negative_binomial(n, p, size=None):
142 if p <= 0 or p >= 1:
143 raise ValueError("p must be in the interval (0, 1)")
144 if n <= 0:
145 raise ValueError("n must be strictly positive")
146 # numpy implementation uses scale = (1 - p) / p
147 scale = (1 - p) / p
148 # poisson requires shape to be a tuple
149 if isinstance(size, int):
150 size = (size,)
151 lambda_ = ivy.gamma(n, scale, shape=size)
152 return ivy.poisson(lam=lambda_, shape=size)
153
154
155 @to_ivy_arrays_and_back
156 @from_zero_dim_arrays_to_scalar
157 def weibull(a, size=None):
158 if a < 0:
159 return 0
160 u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
161 return ivy.pow(-ivy.log(1 - u), 1 / a)
162
163
164 @to_ivy_arrays_and_back
165 @from_zero_dim_arrays_to_scalar
166 def standard_cauchy(size=None):
167 u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
168 return ivy.tan(ivy.pi * (u - 0.5))
169
170
171 @to_ivy_arrays_and_back
172 @from_zero_dim_arrays_to_scalar
173 def rayleigh(scale, size=None):
174 u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
175 log_u = ivy.log(u)
176 x = ivy.multiply(scale, ivy.sqrt(ivy.multiply(-2, log_u)))
177 return x
178
179
180 @to_ivy_arrays_and_back
181 @from_zero_dim_arrays_to_scalar
182 def gumbel(loc=0.0, scale=1.0, size=None):
183 u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
184 x = loc - scale * ivy.log(-ivy.log(u))
185 return x
186
187
188 @to_ivy_arrays_and_back
189 @from_zero_dim_arrays_to_scalar
190 def gamma(shape, scale=1.0, size=None):
191 return ivy.gamma(shape, scale, shape=size, dtype="float64")
192
193
194 @to_ivy_arrays_and_back
195 @from_zero_dim_arrays_to_scalar
196 def logistic(loc=0.0, scale=1.0, size=None):
197 u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype="float64")
198 x = loc + scale * ivy.log(u / (1 - u))
199 return x
200
[end of ivy/functional/frontends/numpy/random/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/numpy/random/functions.py b/ivy/functional/frontends/numpy/random/functions.py
--- a/ivy/functional/frontends/numpy/random/functions.py
+++ b/ivy/functional/frontends/numpy/random/functions.py
@@ -197,3 +197,19 @@
u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype="float64")
x = loc + scale * ivy.log(u / (1 - u))
return x
+
+
+@to_ivy_arrays_and_back
+@from_zero_dim_arrays_to_scalar
+def triangular(left, mode, right, size=None):
+ if left > mode or mode > right or left == right:
+ raise ivy.utils.exceptions.IvyValueError(
+ "left < mode < right is not being followed"
+ )
+ u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype="float64")
+ condition = u <= (mode - left) / (right - left)
+ values1 = left + (right - left) * (u * (mode - left) / (right - left)) ** 0.5
+ values2 = (
+ right - (right - mode) * ((1 - u) * (right - mode) / (right - left)) ** 0.5
+ )
+ return ivy.where(condition, values1, values2)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/random/functions.py b/ivy/functional/frontends/numpy/random/functions.py\n--- a/ivy/functional/frontends/numpy/random/functions.py\n+++ b/ivy/functional/frontends/numpy/random/functions.py\n@@ -197,3 +197,19 @@\n u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype=\"float64\")\n x = loc + scale * ivy.log(u / (1 - u))\n return x\n+\n+\n+@to_ivy_arrays_and_back\n+@from_zero_dim_arrays_to_scalar\n+def triangular(left, mode, right, size=None):\n+ if left > mode or mode > right or left == right:\n+ raise ivy.utils.exceptions.IvyValueError(\n+ \"left < mode < right is not being followed\"\n+ )\n+ u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n+ condition = u <= (mode - left) / (right - left)\n+ values1 = left + (right - left) * (u * (mode - left) / (right - left)) ** 0.5\n+ values2 = (\n+ right - (right - mode) * ((1 - u) * (right - mode) / (right - left)) ** 0.5\n+ )\n+ return ivy.where(condition, values1, values2)\n", "issue": "triangular\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef random_sample(size=None):\n return ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef dirichlet(alpha, size=None):\n return ivy.dirichlet(alpha, size=size)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef uniform(low=0.0, high=1.0, size=None):\n return ivy.random_uniform(low=low, high=high, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef geometric(p, size=None):\n if p < 0 or p > 1:\n raise ValueError(\"p must be in the interval [0, 1]\")\n oneMinusP = ivy.subtract(1, p)\n sizeMinusOne = ivy.subtract(size, 1)\n\n return ivy.multiply(ivy.pow(oneMinusP, sizeMinusOne), p)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef normal(loc=0.0, scale=1.0, size=None):\n return ivy.random_normal(mean=loc, std=scale, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef poisson(lam=1.0, size=None):\n return ivy.poisson(lam=lam, shape=size)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef multinomial(n, pvals, size=None):\n assert not ivy.exists(size) or (len(size) > 0 and len(size) < 3)\n batch_size = 1\n if ivy.exists(size):\n if len(size) == 2:\n batch_size = size[0]\n num_samples = size[1]\n else:\n num_samples = size[0]\n else:\n num_samples = len(pvals)\n return ivy.multinomial(n, num_samples, batch_size=batch_size, probs=pvals)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef permutation(x, /):\n if isinstance(x, int):\n x = ivy.arange(x)\n return ivy.shuffle(x)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef beta(a, b, size=None):\n return ivy.beta(a, b, shape=size)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef shuffle(x, axis=0, /):\n if isinstance(x, int):\n x = ivy.arange(x)\n return ivy.shuffle(x, axis)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef standard_normal(size=None):\n return ivy.random_normal(mean=0.0, std=1.0, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef standard_gamma(shape, size=None):\n return ivy.gamma(shape, 1.0, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef binomial(n, p, size=None):\n if p < 0 or p > 1:\n raise ValueError(\"p must be in the interval (0, 1)\")\n if n < 0:\n raise ValueError(\"n must be strictly positive\")\n if size is None:\n size = 1\n else:\n size = size\n if isinstance(size, int):\n size = (size,)\n lambda_ = ivy.multiply(n, p)\n return ivy.poisson(lambda_, shape=size)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef chisquare(df, size=None):\n df = ivy.array(df) # scalar ints and floats are also array_like\n if ivy.any(df <= 0):\n raise ValueError(\"df <= 0\")\n\n # ivy.gamma() throws an error if both alpha is an array and a shape is passed\n # so this part broadcasts df into the shape of `size`` first to keep it happy.\n if size is not None:\n df = df * ivy.ones(size)\n\n return ivy.gamma(df / 2, 2, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef lognormal(mean=0.0, sigma=1.0, size=None):\n ret = ivy.exp(ivy.random_normal(mean=mean, std=sigma, shape=size, dtype=\"float64\"))\n return ret\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef negative_binomial(n, p, size=None):\n if p <= 0 or p >= 1:\n raise ValueError(\"p must be in the interval (0, 1)\")\n if n <= 0:\n raise ValueError(\"n must be strictly positive\")\n # numpy implementation uses scale = (1 - p) / p\n scale = (1 - p) / p\n # poisson requires shape to be a tuple\n if isinstance(size, int):\n size = (size,)\n lambda_ = ivy.gamma(n, scale, shape=size)\n return ivy.poisson(lam=lambda_, shape=size)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef weibull(a, size=None):\n if a < 0:\n return 0\n u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n return ivy.pow(-ivy.log(1 - u), 1 / a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef standard_cauchy(size=None):\n u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n return ivy.tan(ivy.pi * (u - 0.5))\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef rayleigh(scale, size=None):\n u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n log_u = ivy.log(u)\n x = ivy.multiply(scale, ivy.sqrt(ivy.multiply(-2, log_u)))\n return x\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef gumbel(loc=0.0, scale=1.0, size=None):\n u = ivy.random_uniform(low=0.0, high=1.0, shape=size, dtype=\"float64\")\n x = loc - scale * ivy.log(-ivy.log(u))\n return x\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef gamma(shape, scale=1.0, size=None):\n return ivy.gamma(shape, scale, shape=size, dtype=\"float64\")\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef logistic(loc=0.0, scale=1.0, size=None):\n u = ivy.random_uniform(low=0.0, high=0.0, shape=size, dtype=\"float64\")\n x = loc + scale * ivy.log(u / (1 - u))\n return x\n", "path": "ivy/functional/frontends/numpy/random/functions.py"}]}
| 2,750 | 330 |
gh_patches_debug_15861
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TST `backward` for complex modulus is not tested
Specifically, codecov reports that [lines 90-105](https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/backend/backend_torch.py#L90) of `backend_torch.py` are [never executed](https://codecov.io/gh/kymatio/kymatio/src/master/kymatio/scattering1d/backend/backend_torch.py#L90). This is odd because we specifically test the `backward` function in the [corresponding test](https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/tests/test_utils.py#L87). Somehow, torch must be bypassing our code.
</issue>
<code>
[start of kymatio/scattering1d/backend/backend_torch.py]
1 # Authors: Edouard Oyallon, Joakim Anden, Mathieu Andreux
2
3 import numpy as np
4 import torch
5 import torch.nn.functional as F
6 from torch.autograd import Function
7
8 NAME = 'torch'
9
10 def is_complex(input):
11 return input.size(-1) == 2
12
13 class ModulusStable(Function):
14 """Stable complex modulus
15
16 This class implements a modulus transform for complex numbers which is
17 stable with respect to very small inputs (z close to 0), avoiding
18 returning nans in all cases.
19
20 Usage
21 -----
22 modulus = ModulusStable.apply # apply inherited from Function
23 x_mod = modulus(x)
24
25 Parameters
26 ---------
27 x : tensor
28 The complex tensor (i.e., whose last dimension is two) whose modulus
29 we want to compute.
30
31 Returns
32 -------
33 output : tensor
34 A tensor of same size as the input tensor, except for the last
35 dimension, which is removed. This tensor is differentiable with respect
36 to the input in a stable fashion (so gradent of the modulus at zero is
37 zero).
38 """
39
40 @staticmethod
41 def forward(ctx, x):
42 """Forward pass of the modulus.
43
44 This is a static method which does not require an instantiation of the
45 class.
46
47 Arguments
48 ---------
49 ctx : context object
50 Collected during the forward pass. These are automatically added
51 by PyTorch and should not be touched. They are then used for the
52 backward pass.
53 x : tensor
54 The complex tensor whose modulus is to be computed.
55
56 Returns
57 -------
58 output : tensor
59 This contains the modulus computed along the last axis, with that
60 axis removed.
61 """
62 ctx.p = 2
63 ctx.dim = -1
64 ctx.keepdim = False
65
66 output = (x[...,0]*x[...,0] + x[...,1]*x[...,1]).sqrt()
67
68 ctx.save_for_backward(x, output)
69 return output
70
71 @staticmethod
72 def backward(ctx, grad_output):
73 """Backward pass of the modulus
74
75 This is a static method which does not require an instantiation of the
76 class.
77
78 Arguments
79 ---------
80 ctx : context object
81 Collected during the forward pass. These are automatically added
82 by PyTorch and should not be touched. They are then used for the
83 backward pass.
84 grad_output : tensor
85 The gradient with respect to the output tensor computed at the
86 forward pass.
87
88 Returns
89 -------
90 grad_input : tensor
91 The gradient with respect to the input.
92 """
93 x, output = ctx.saved_tensors
94 if ctx.dim is not None and ctx.keepdim is False and x.dim() != 1:
95 grad_output = grad_output.unsqueeze(ctx.dim)
96 output = output.unsqueeze(ctx.dim)
97
98 if ctx.p == 2:
99 grad_input = x.mul(grad_output).div(output)
100 else:
101 input_pow = x.abs().pow(ctx.p - 2)
102 output_pow = output.pow(ctx.p - 1)
103 grad_input = x.mul(input_pow).mul(grad_output).div(output_pow)
104
105 # Special case at 0 where we return a subgradient containing 0
106 grad_input.masked_fill_(output == 0, 0)
107
108 return grad_input, None, None, None
109
110 # shortcut for ModulusStable.apply
111 modulus = ModulusStable.apply
112
113 def modulus_complex(x):
114 """Compute the complex modulus
115
116 Computes the modulus of x and stores the result in a complex tensor of the
117 same size, with the real part equal to the modulus and the imaginary part
118 equal to zero.
119
120 Parameters
121 ----------
122 x : tensor
123 A complex tensor (that is, whose last dimension is equal to 2).
124
125 Returns
126 -------
127 res : tensor
128 A tensor with the same dimensions as x, such that res[..., 0] contains
129 the complex modulus of x, while res[..., 1] = 0.
130 """
131 if not is_complex(x):
132 raise TypeError('The input should be complex.')
133
134 norm = modulus(x)
135
136 res = torch.zeros_like(x)
137 res[...,0] = norm
138
139 return res
140
141 def subsample_fourier(x, k):
142 """Subsampling in the Fourier domain
143
144 Subsampling in the temporal domain amounts to periodization in the Fourier
145 domain, so the input is periodized according to the subsampling factor.
146
147 Parameters
148 ----------
149 x : tensor
150 Input tensor with at least 3 dimensions, where the next to last
151 corresponds to the frequency index in the standard PyTorch FFT
152 ordering. The length of this dimension should be a power of 2 to
153 avoid errors. The last dimension should represent the real and
154 imaginary parts of the Fourier transform.
155 k : int
156 The subsampling factor.
157
158 Returns
159 -------
160 res : tensor
161 The input tensor periodized along the next to last axis to yield a
162 tensor of size x.shape[-2] // k along that dimension.
163 """
164 if not is_complex(x):
165 raise TypeError('The input should be complex.')
166
167 N = x.shape[-2]
168 res = x.view(x.shape[:-2] + (k, N // k, 2)).mean(dim=-3)
169 return res
170
171 def pad_1d(x, pad_left, pad_right, mode='constant', value=0.):
172 """Pad real 1D tensors
173
174 1D implementation of the padding function for real PyTorch tensors.
175
176 Parameters
177 ----------
178 x : tensor
179 Three-dimensional input tensor with the third axis being the one to
180 be padded.
181 pad_left : int
182 Amount to add on the left of the tensor (at the beginning of the
183 temporal axis).
184 pad_right : int
185 amount to add on the right of the tensor (at the end of the temporal
186 axis).
187 mode : string, optional
188 Padding mode. Options include 'constant' and 'reflect'. See the
189 PyTorch API for other options. Defaults to 'constant'.
190 value : float, optional
191 If mode == 'constant', value to input within the padding. Defaults to
192 0.
193
194 Returns
195 -------
196 res : tensor
197 The tensor passed along the third dimension.
198 """
199 if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):
200 if mode == 'reflect':
201 raise ValueError('Indefinite padding size (larger than tensor).')
202 res = F.pad(x.unsqueeze(2),
203 (pad_left, pad_right, 0, 0),
204 mode=mode, value=value).squeeze(2)
205 return res
206
207 def pad(x, pad_left=0, pad_right=0, to_complex=True):
208 """Pad real 1D tensors and map to complex
209
210 Padding which allows to simultaneously pad in a reflection fashion and map
211 to complex if necessary.
212
213 Parameters
214 ----------
215 x : tensor
216 Three-dimensional input tensor with the third axis being the one to
217 be padded.
218 pad_left : int
219 Amount to add on the left of the tensor (at the beginning of the
220 temporal axis).
221 pad_right : int
222 amount to add on the right of the tensor (at the end of the temporal
223 axis).
224 to_complex : boolean, optional
225 Whether to map the resulting padded tensor to a complex type (seen
226 as a real number). Defaults to True.
227
228 Returns
229 -------
230 output : tensor
231 A padded signal, possibly transformed into a four-dimensional tensor
232 with the last axis of size 2 if to_complex is True (this axis
233 corresponds to the real and imaginary parts).
234 """
235 output = pad_1d(x, pad_left, pad_right, mode='reflect')
236 if to_complex:
237 output = torch.stack((output, torch.zeros_like(output)), dim=-1)
238 return output
239
240 def unpad(x, i0, i1):
241 """Unpad real 1D tensor
242
243 Slices the input tensor at indices between i0 and i1 along the last axis.
244
245 Parameters
246 ----------
247 x : tensor
248 Input tensor with least one axis.
249 i0 : int
250 Start of original signal before padding.
251 i1 : int
252 End of original signal before padding.
253
254 Returns
255 -------
256 x_unpadded : tensor
257 The tensor x[..., i0:i1].
258 """
259 return x[..., i0:i1]
260
261 def real(x):
262 """Real part of complex tensor
263
264 Takes the real part of a complex tensor, where the last axis corresponds
265 to the real and imaginary parts.
266
267 Parameters
268 ----------
269 x : tensor
270 A complex tensor (that is, whose last dimension is equal to 2).
271
272 Returns
273 -------
274 x_real : tensor
275 The tensor x[..., 0] which is interpreted as the real part of x.
276 """
277 return x[..., 0]
278
279 def fft1d_c2c(x):
280 """Compute the 1D FFT of a complex signal
281
282 Input
283 -----
284 x : tensor
285 A tensor of size (..., T, 2), where x[..., 0] is the real part and
286 x[..., 1] is the imaginary part.
287
288 Returns
289 -------
290 x_f : tensor
291 A tensor of the same size as x containing its Fourier transform in the
292 standard PyTorch FFT ordering.
293 """
294 return torch.fft(x, signal_ndim=1)
295
296 def ifft1d_c2c(x):
297 """Compute the normalized 1D inverse FFT of a complex signal
298
299 Input
300 -----
301 x_f : tensor
302 A tensor of size (..., T, 2), where x_f[..., 0] is the real part and
303 x[..., 1] is the imaginary part. The frequencies are assumed to be in
304 the standard PyTorch FFT ordering.
305
306 Returns
307 -------
308 x : tensor
309 A tensor of the same size of x_f containing the normalized inverse
310 Fourier transform of x_f.
311 """
312 return torch.ifft(x, signal_ndim=1)
313
[end of kymatio/scattering1d/backend/backend_torch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kymatio/scattering1d/backend/backend_torch.py b/kymatio/scattering1d/backend/backend_torch.py
--- a/kymatio/scattering1d/backend/backend_torch.py
+++ b/kymatio/scattering1d/backend/backend_torch.py
@@ -95,17 +95,12 @@
grad_output = grad_output.unsqueeze(ctx.dim)
output = output.unsqueeze(ctx.dim)
- if ctx.p == 2:
- grad_input = x.mul(grad_output).div(output)
- else:
- input_pow = x.abs().pow(ctx.p - 2)
- output_pow = output.pow(ctx.p - 1)
- grad_input = x.mul(input_pow).mul(grad_output).div(output_pow)
+ grad_input = x.mul(grad_output).div(output)
# Special case at 0 where we return a subgradient containing 0
grad_input.masked_fill_(output == 0, 0)
- return grad_input, None, None, None
+ return grad_input
# shortcut for ModulusStable.apply
modulus = ModulusStable.apply
|
{"golden_diff": "diff --git a/kymatio/scattering1d/backend/backend_torch.py b/kymatio/scattering1d/backend/backend_torch.py\n--- a/kymatio/scattering1d/backend/backend_torch.py\n+++ b/kymatio/scattering1d/backend/backend_torch.py\n@@ -95,17 +95,12 @@\n grad_output = grad_output.unsqueeze(ctx.dim)\n output = output.unsqueeze(ctx.dim)\n \n- if ctx.p == 2:\n- grad_input = x.mul(grad_output).div(output)\n- else:\n- input_pow = x.abs().pow(ctx.p - 2)\n- output_pow = output.pow(ctx.p - 1)\n- grad_input = x.mul(input_pow).mul(grad_output).div(output_pow)\n+ grad_input = x.mul(grad_output).div(output)\n \n # Special case at 0 where we return a subgradient containing 0\n grad_input.masked_fill_(output == 0, 0)\n \n- return grad_input, None, None, None\n+ return grad_input\n \n # shortcut for ModulusStable.apply\n modulus = ModulusStable.apply\n", "issue": "TST `backward` for complex modulus is not tested\nSpecifically, codecov reports that [lines 90-105](https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/backend/backend_torch.py#L90) of `backend_torch.py` are [never executed](https://codecov.io/gh/kymatio/kymatio/src/master/kymatio/scattering1d/backend/backend_torch.py#L90). This is odd because we specifically test the `backward` function in the [corresponding test](https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/tests/test_utils.py#L87). Somehow, torch must be bypassing our code.\r\n\r\n\n", "before_files": [{"content": "# Authors: Edouard Oyallon, Joakim Anden, Mathieu Andreux\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Function\n\nNAME = 'torch'\n\ndef is_complex(input):\n return input.size(-1) == 2\n\nclass ModulusStable(Function):\n \"\"\"Stable complex modulus\n\n This class implements a modulus transform for complex numbers which is\n stable with respect to very small inputs (z close to 0), avoiding\n returning nans in all cases.\n\n Usage\n -----\n modulus = ModulusStable.apply # apply inherited from Function\n x_mod = modulus(x)\n\n Parameters\n ---------\n x : tensor\n The complex tensor (i.e., whose last dimension is two) whose modulus\n we want to compute.\n\n Returns\n -------\n output : tensor\n A tensor of same size as the input tensor, except for the last\n dimension, which is removed. This tensor is differentiable with respect\n to the input in a stable fashion (so gradent of the modulus at zero is\n zero).\n \"\"\"\n\n @staticmethod\n def forward(ctx, x):\n \"\"\"Forward pass of the modulus.\n\n This is a static method which does not require an instantiation of the\n class.\n\n Arguments\n ---------\n ctx : context object\n Collected during the forward pass. These are automatically added\n by PyTorch and should not be touched. They are then used for the\n backward pass.\n x : tensor\n The complex tensor whose modulus is to be computed.\n\n Returns\n -------\n output : tensor\n This contains the modulus computed along the last axis, with that\n axis removed.\n \"\"\"\n ctx.p = 2\n ctx.dim = -1\n ctx.keepdim = False\n\n output = (x[...,0]*x[...,0] + x[...,1]*x[...,1]).sqrt()\n\n ctx.save_for_backward(x, output)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"Backward pass of the modulus\n\n This is a static method which does not require an instantiation of the\n class.\n\n Arguments\n ---------\n ctx : context object\n Collected during the forward pass. These are automatically added\n by PyTorch and should not be touched. They are then used for the\n backward pass.\n grad_output : tensor\n The gradient with respect to the output tensor computed at the\n forward pass.\n\n Returns\n -------\n grad_input : tensor\n The gradient with respect to the input.\n \"\"\"\n x, output = ctx.saved_tensors\n if ctx.dim is not None and ctx.keepdim is False and x.dim() != 1:\n grad_output = grad_output.unsqueeze(ctx.dim)\n output = output.unsqueeze(ctx.dim)\n\n if ctx.p == 2:\n grad_input = x.mul(grad_output).div(output)\n else:\n input_pow = x.abs().pow(ctx.p - 2)\n output_pow = output.pow(ctx.p - 1)\n grad_input = x.mul(input_pow).mul(grad_output).div(output_pow)\n\n # Special case at 0 where we return a subgradient containing 0\n grad_input.masked_fill_(output == 0, 0)\n\n return grad_input, None, None, None\n\n# shortcut for ModulusStable.apply\nmodulus = ModulusStable.apply\n\ndef modulus_complex(x):\n \"\"\"Compute the complex modulus\n\n Computes the modulus of x and stores the result in a complex tensor of the\n same size, with the real part equal to the modulus and the imaginary part\n equal to zero.\n\n Parameters\n ----------\n x : tensor\n A complex tensor (that is, whose last dimension is equal to 2).\n\n Returns\n -------\n res : tensor\n A tensor with the same dimensions as x, such that res[..., 0] contains\n the complex modulus of x, while res[..., 1] = 0.\n \"\"\"\n if not is_complex(x):\n raise TypeError('The input should be complex.')\n\n norm = modulus(x)\n\n res = torch.zeros_like(x)\n res[...,0] = norm\n\n return res\n\ndef subsample_fourier(x, k):\n \"\"\"Subsampling in the Fourier domain\n\n Subsampling in the temporal domain amounts to periodization in the Fourier\n domain, so the input is periodized according to the subsampling factor.\n\n Parameters\n ----------\n x : tensor\n Input tensor with at least 3 dimensions, where the next to last\n corresponds to the frequency index in the standard PyTorch FFT\n ordering. The length of this dimension should be a power of 2 to\n avoid errors. The last dimension should represent the real and\n imaginary parts of the Fourier transform.\n k : int\n The subsampling factor.\n\n Returns\n -------\n res : tensor\n The input tensor periodized along the next to last axis to yield a\n tensor of size x.shape[-2] // k along that dimension.\n \"\"\"\n if not is_complex(x):\n raise TypeError('The input should be complex.')\n\n N = x.shape[-2]\n res = x.view(x.shape[:-2] + (k, N // k, 2)).mean(dim=-3)\n return res\n\ndef pad_1d(x, pad_left, pad_right, mode='constant', value=0.):\n \"\"\"Pad real 1D tensors\n\n 1D implementation of the padding function for real PyTorch tensors.\n\n Parameters\n ----------\n x : tensor\n Three-dimensional input tensor with the third axis being the one to\n be padded.\n pad_left : int\n Amount to add on the left of the tensor (at the beginning of the\n temporal axis).\n pad_right : int\n amount to add on the right of the tensor (at the end of the temporal\n axis).\n mode : string, optional\n Padding mode. Options include 'constant' and 'reflect'. See the\n PyTorch API for other options. Defaults to 'constant'.\n value : float, optional\n If mode == 'constant', value to input within the padding. Defaults to\n 0.\n\n Returns\n -------\n res : tensor\n The tensor passed along the third dimension.\n \"\"\"\n if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):\n if mode == 'reflect':\n raise ValueError('Indefinite padding size (larger than tensor).')\n res = F.pad(x.unsqueeze(2),\n (pad_left, pad_right, 0, 0),\n mode=mode, value=value).squeeze(2)\n return res\n\ndef pad(x, pad_left=0, pad_right=0, to_complex=True):\n \"\"\"Pad real 1D tensors and map to complex\n\n Padding which allows to simultaneously pad in a reflection fashion and map\n to complex if necessary.\n\n Parameters\n ----------\n x : tensor\n Three-dimensional input tensor with the third axis being the one to\n be padded.\n pad_left : int\n Amount to add on the left of the tensor (at the beginning of the\n temporal axis).\n pad_right : int\n amount to add on the right of the tensor (at the end of the temporal\n axis).\n to_complex : boolean, optional\n Whether to map the resulting padded tensor to a complex type (seen\n as a real number). Defaults to True.\n\n Returns\n -------\n output : tensor\n A padded signal, possibly transformed into a four-dimensional tensor\n with the last axis of size 2 if to_complex is True (this axis\n corresponds to the real and imaginary parts).\n \"\"\"\n output = pad_1d(x, pad_left, pad_right, mode='reflect')\n if to_complex:\n output = torch.stack((output, torch.zeros_like(output)), dim=-1)\n return output\n\ndef unpad(x, i0, i1):\n \"\"\"Unpad real 1D tensor\n\n Slices the input tensor at indices between i0 and i1 along the last axis.\n\n Parameters\n ----------\n x : tensor\n Input tensor with least one axis.\n i0 : int\n Start of original signal before padding.\n i1 : int\n End of original signal before padding.\n\n Returns\n -------\n x_unpadded : tensor\n The tensor x[..., i0:i1].\n \"\"\"\n return x[..., i0:i1]\n\ndef real(x):\n \"\"\"Real part of complex tensor\n\n Takes the real part of a complex tensor, where the last axis corresponds\n to the real and imaginary parts.\n\n Parameters\n ----------\n x : tensor\n A complex tensor (that is, whose last dimension is equal to 2).\n\n Returns\n -------\n x_real : tensor\n The tensor x[..., 0] which is interpreted as the real part of x.\n \"\"\"\n return x[..., 0]\n\ndef fft1d_c2c(x):\n \"\"\"Compute the 1D FFT of a complex signal\n\n Input\n -----\n x : tensor\n A tensor of size (..., T, 2), where x[..., 0] is the real part and\n x[..., 1] is the imaginary part.\n\n Returns\n -------\n x_f : tensor\n A tensor of the same size as x containing its Fourier transform in the\n standard PyTorch FFT ordering.\n \"\"\"\n return torch.fft(x, signal_ndim=1)\n\ndef ifft1d_c2c(x):\n \"\"\"Compute the normalized 1D inverse FFT of a complex signal\n\n Input\n -----\n x_f : tensor\n A tensor of size (..., T, 2), where x_f[..., 0] is the real part and\n x[..., 1] is the imaginary part. The frequencies are assumed to be in\n the standard PyTorch FFT ordering.\n\n Returns\n -------\n x : tensor\n A tensor of the same size of x_f containing the normalized inverse\n Fourier transform of x_f.\n \"\"\"\n return torch.ifft(x, signal_ndim=1)\n", "path": "kymatio/scattering1d/backend/backend_torch.py"}]}
| 3,773 | 247 |
gh_patches_debug_24388
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4190
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
</issue>
<code>
[start of pulpcore/app/tasks/reclaim_space.py]
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10 from pulpcore.app.util import get_domain
11
12
13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
14 """
15 This task frees-up disk space by removing Artifact files from the filesystem for Content
16 exclusive to the list of provided repos.
17
18 Note: content marked as `proctected` will be excluded from the reclaim disk space.
19
20 Kwargs:
21 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
23 disk space.
24 force (bool): If True, uploaded content will be taken into account.
25
26 """
27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
28 for repo in reclaimed_repos:
29 repo.invalidate_cache(everything=True)
30
31 domain = get_domain()
32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)
33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
35 c_reclaim_qs = c_reclaim_qs.exclude(
36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
37 )
38
39 if keeplist_rv_pks:
40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
41 rv_content = Content.objects.none()
42 for rv in rv_qs.iterator():
43 rv_content |= rv.content
44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
45
46 content_distinct = c_reclaim_qs.distinct("pulp_type")
47 unprotected = []
48 for content in content_distinct:
49 if not content.cast().PROTECTED_FROM_RECLAIM:
50 unprotected.append(content.pulp_type)
51
52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
54 )
55 if not force:
56 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
57 artifact_pks = set()
58 ca_to_update = []
59 for ca in ca_qs.iterator():
60 if ca.content.pulp_type in unprotected:
61 artifact_pks.add(ca.artifact.pk)
62 ca.artifact = None
63 ca_to_update.append(ca)
64
65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
67 progress_bar = ProgressReport(
68 message="Reclaim disk space",
69 total=artifacts_to_delete.count(),
70 code="reclaim-space.artifact",
71 done=0,
72 state="running",
73 )
74 progress_bar.save()
75
76 counter = 0
77 interval = 100
78 for artifact in artifacts_to_delete.iterator():
79 # we need to manually call delete() because it cleans up the file on the filesystem
80 artifact.delete()
81 progress_bar.done += 1
82 counter += 1
83
84 if counter >= interval:
85 progress_bar.save()
86 counter = 0
87
88 progress_bar.state = "completed"
89 progress_bar.save()
90
[end of pulpcore/app/tasks/reclaim_space.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
|
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
| 1,627 | 322 |
gh_patches_debug_31582
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-3131
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using of wrong Money constructor on displaying order info in dashboard
### What I'm trying to achieve
I've tried to add refunds / captures (using dummy provider's preauth. payment)
### Steps to reproduce the problem
1. Create payment using dummy provider
2. Preauth. payment
3. Make payment refunds and captures in dashboard
4. Open order in dashboard
### What I expected to happen
I expected to see order page in dashboard, but I've got `conversion from dict to Decimal is not supported` on order's page
I guess the root of the problem is (from Django debugger information):
Line: `/app/saleor/order/__init__.py : 134-135` in display_order_event
Code:
```
amount = Money(
amount=params['amount'], currency=settings.DEFAULT_CURRENCY)
```
Local vars:
```
event_type | 'captured'
order_event | OrderEvent(type='captured', user=<User: ***@***>)
params | {'amount': {'_type': 'Money', 'amount': '500', 'currency': 'USD'}}
```
Possible solution:
```
amount = Money(
amount=params['amount']['amount'], currency=params['amount']['currency'])
```
**System information**
Operating system: ubuntu 16.04
Browser: chrome
</issue>
<code>
[start of saleor/order/__init__.py]
1 from enum import Enum
2
3 from django.apps import AppConfig
4 from django.conf import settings
5 from django.utils.translation import npgettext_lazy, pgettext_lazy
6 from django_prices.templatetags import prices_i18n
7 from prices import Money
8
9
10 class OrderAppConfig(AppConfig):
11 name = 'saleor.order'
12
13 def ready(self):
14 from payments.signals import status_changed
15 from .signals import order_status_change
16 status_changed.connect(order_status_change)
17
18
19 class OrderStatus:
20 DRAFT = 'draft'
21 UNFULFILLED = 'unfulfilled'
22 PARTIALLY_FULFILLED = 'partially fulfilled'
23 FULFILLED = 'fulfilled'
24 CANCELED = 'canceled'
25
26 CHOICES = [
27 (DRAFT, pgettext_lazy(
28 'Status for a fully editable, not confirmed order created by '
29 'staff users',
30 'Draft')),
31 (UNFULFILLED, pgettext_lazy(
32 'Status for an order with any items marked as fulfilled',
33 'Unfulfilled')),
34 (PARTIALLY_FULFILLED, pgettext_lazy(
35 'Status for an order with some items marked as fulfilled',
36 'Partially fulfilled')),
37 (FULFILLED, pgettext_lazy(
38 'Status for an order with all items marked as fulfilled',
39 'Fulfilled')),
40 (CANCELED, pgettext_lazy(
41 'Status for a permanently canceled order',
42 'Canceled'))]
43
44
45 class FulfillmentStatus:
46 FULFILLED = 'fulfilled'
47 CANCELED = 'canceled'
48
49 CHOICES = [
50 (FULFILLED, pgettext_lazy(
51 'Status for a group of products in an order marked as fulfilled',
52 'Fulfilled')),
53 (CANCELED, pgettext_lazy(
54 'Status for a fulfilled group of products in an order marked '
55 'as canceled',
56 'Canceled'))]
57
58
59 class CustomPaymentChoices:
60 MANUAL = 'manual'
61
62 CHOICES = [
63 (MANUAL, pgettext_lazy('Custom payment choice type', 'Manual'))]
64
65
66 class OrderEvents(Enum):
67 PLACED = 'placed'
68 PLACED_FROM_DRAFT = 'draft_placed'
69 OVERSOLD_ITEMS = 'oversold_items'
70 ORDER_MARKED_AS_PAID = 'marked_as_paid'
71 CANCELED = 'canceled'
72 ORDER_FULLY_PAID = 'order_paid'
73 UPDATED = 'updated'
74
75 EMAIL_SENT = 'email_sent'
76
77 PAYMENT_CAPTURED = 'captured'
78 PAYMENT_REFUNDED = 'refunded'
79 PAYMENT_RELEASED = 'released'
80
81 FULFILLMENT_CANCELED = 'fulfillment_canceled'
82 FULFILLMENT_RESTOCKED_ITEMS = 'restocked_items'
83 FULFILLMENT_FULFILLED_ITEMS = 'fulfilled_items'
84 TRACKING_UPDATED = 'tracking_updated'
85 NOTE_ADDED = 'note_added'
86
87 # Used mostly for importing legacy data from before Enum-based events
88 OTHER = 'other'
89
90
91 class OrderEventsEmails(Enum):
92 PAYMENT = 'payment_confirmation'
93 SHIPPING = 'shipping_confirmation'
94 ORDER = 'order_confirmation'
95 FULFILLMENT = 'fulfillment_confirmation'
96
97
98 EMAIL_CHOICES = {
99 OrderEventsEmails.PAYMENT.value: pgettext_lazy(
100 'Email type', 'Payment confirmation'),
101 OrderEventsEmails.SHIPPING.value: pgettext_lazy(
102 'Email type', 'Shipping confirmation'),
103 OrderEventsEmails.FULFILLMENT.value: pgettext_lazy(
104 'Email type', 'Fulfillment confirmation'),
105 OrderEventsEmails.ORDER.value: pgettext_lazy(
106 'Email type', 'Order confirmation')}
107
108
109 def display_order_event(order_event):
110 """This function is used to keep the backwards compatibility
111 with the old dashboard and new type of order events
112 (storing enums instead of messages)
113 """
114 event_type = order_event.type
115 params = order_event.parameters
116 if event_type == OrderEvents.PLACED_FROM_DRAFT.value:
117 return pgettext_lazy(
118 'Dashboard message related to an order',
119 'Order created from draft order by %(user_name)s' % {
120 'user_name': order_event.user})
121 if event_type == OrderEvents.PAYMENT_RELEASED.value:
122 return pgettext_lazy(
123 'Dashboard message related to an order',
124 'Payment was released by %(user_name)s' % {
125 'user_name': order_event.user})
126 if event_type == OrderEvents.PAYMENT_REFUNDED.value:
127 amount = Money(
128 amount=params['amount'], currency=settings.DEFAULT_CURRENCY)
129 return pgettext_lazy(
130 'Dashboard message related to an order',
131 'Successfully refunded: %(amount)s' % {
132 'amount': prices_i18n.amount(amount)})
133 if event_type == OrderEvents.PAYMENT_CAPTURED.value:
134 amount = Money(
135 amount=params['amount'], currency=settings.DEFAULT_CURRENCY)
136 return pgettext_lazy(
137 'Dashboard message related to an order',
138 'Successfully captured: %(amount)s' % {
139 'amount': prices_i18n.amount(amount)})
140 if event_type == OrderEvents.ORDER_MARKED_AS_PAID.value:
141 return pgettext_lazy(
142 'Dashboard message related to an order',
143 'Order manually marked as paid by %(user_name)s' % {
144 'user_name': order_event.user})
145 if event_type == OrderEvents.CANCELED.value:
146 return pgettext_lazy(
147 'Dashboard message related to an order',
148 'Order was canceled by %(user_name)s' % {
149 'user_name': order_event.user})
150 if event_type == OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value:
151 return npgettext_lazy(
152 'Dashboard message related to an order',
153 'We restocked %(quantity)d item',
154 'We restocked %(quantity)d items',
155 'quantity') % {'quantity': params['quantity']}
156 if event_type == OrderEvents.NOTE_ADDED.value:
157 return pgettext_lazy(
158 'Dashboard message related to an order',
159 '%(user_name)s added note: %(note)s' % {
160 'note': params['message'],
161 'user_name': order_event.user})
162 if event_type == OrderEvents.FULFILLMENT_CANCELED.value:
163 return pgettext_lazy(
164 'Dashboard message',
165 'Fulfillment #%(fulfillment)s canceled by %(user_name)s') % {
166 'fulfillment': params['composed_id'],
167 'user_name': order_event.user}
168 if event_type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS.value:
169 return npgettext_lazy(
170 'Dashboard message related to an order',
171 'Fulfilled %(quantity_fulfilled)d item',
172 'Fulfilled %(quantity_fulfilled)d items',
173 'quantity_fulfilled') % {
174 'quantity_fulfilled': params['quantity']}
175 if event_type == OrderEvents.PLACED.value:
176 return pgettext_lazy(
177 'Dashboard message related to an order',
178 'Order was placed')
179 if event_type == OrderEvents.ORDER_FULLY_PAID.value:
180 return pgettext_lazy(
181 'Dashboard message related to an order',
182 'Order was fully paid')
183 if event_type == OrderEvents.EMAIL_SENT.value:
184 return pgettext_lazy(
185 'Dashboard message related to an order',
186 '%(email_type)s email was sent to the customer '
187 '(%(email)s)') % {
188 'email_type': EMAIL_CHOICES[params['email_type']],
189 'email': params['email']}
190 if event_type == OrderEvents.UPDATED.value:
191 return pgettext_lazy(
192 'Dashboard message related to an order',
193 'Order details were updated by %(user_name)s' % {
194 'user_name': order_event.user})
195 if event_type == OrderEvents.TRACKING_UPDATED.value:
196 return pgettext_lazy(
197 'Dashboard message related to an order',
198 'Fulfillment #%(fulfillment)s tracking was updated to'
199 ' %(tracking_number)s by %(user_name)s') % {
200 'fulfillment': params['composed_id'],
201 'tracking_number': params['tracking_number'],
202 'user_name': order_event.user}
203 if event_type == OrderEvents.OVERSOLD_ITEMS.value:
204 return npgettext_lazy(
205 'Dashboard message related to an order',
206 '%(quantity)d line item oversold on this order.',
207 '%(quantity)d line items oversold on this order.') % {
208 'quantity': len(params['oversold_items'])}
209
210 if event_type == OrderEvents.OTHER.value:
211 return order_event.parameters['message']
212 raise ValueError('Not supported event type: %s' % (event_type))
213
[end of saleor/order/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/order/__init__.py b/saleor/order/__init__.py
--- a/saleor/order/__init__.py
+++ b/saleor/order/__init__.py
@@ -106,6 +106,19 @@
'Email type', 'Order confirmation')}
+def get_money_from_params(amount):
+ """Money serialization changed at one point, as for now it's serialized
+ as a dict. But we keep those settings for the legacy data.
+
+ Can be safely removed after migrating to Dashboard 2.0
+ """
+ if isinstance(amount, Money):
+ return amount
+ if isinstance(amount, dict):
+ return Money(amount=amount['amount'], currency=amount['currency'])
+ return Money(amount, settings.DEFAULT_CURRENCY)
+
+
def display_order_event(order_event):
"""This function is used to keep the backwards compatibility
with the old dashboard and new type of order events
@@ -124,15 +137,13 @@
'Payment was released by %(user_name)s' % {
'user_name': order_event.user})
if event_type == OrderEvents.PAYMENT_REFUNDED.value:
- amount = Money(
- amount=params['amount'], currency=settings.DEFAULT_CURRENCY)
+ amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully refunded: %(amount)s' % {
'amount': prices_i18n.amount(amount)})
if event_type == OrderEvents.PAYMENT_CAPTURED.value:
- amount = Money(
- amount=params['amount'], currency=settings.DEFAULT_CURRENCY)
+ amount = get_money_from_params(params['amount'])
return pgettext_lazy(
'Dashboard message related to an order',
'Successfully captured: %(amount)s' % {
|
{"golden_diff": "diff --git a/saleor/order/__init__.py b/saleor/order/__init__.py\n--- a/saleor/order/__init__.py\n+++ b/saleor/order/__init__.py\n@@ -106,6 +106,19 @@\n 'Email type', 'Order confirmation')}\n \n \n+def get_money_from_params(amount):\n+ \"\"\"Money serialization changed at one point, as for now it's serialized\n+ as a dict. But we keep those settings for the legacy data.\n+\n+ Can be safely removed after migrating to Dashboard 2.0\n+ \"\"\"\n+ if isinstance(amount, Money):\n+ return amount\n+ if isinstance(amount, dict):\n+ return Money(amount=amount['amount'], currency=amount['currency'])\n+ return Money(amount, settings.DEFAULT_CURRENCY)\n+\n+\n def display_order_event(order_event):\n \"\"\"This function is used to keep the backwards compatibility\n with the old dashboard and new type of order events\n@@ -124,15 +137,13 @@\n 'Payment was released by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.PAYMENT_REFUNDED.value:\n- amount = Money(\n- amount=params['amount'], currency=settings.DEFAULT_CURRENCY)\n+ amount = get_money_from_params(params['amount'])\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Successfully refunded: %(amount)s' % {\n 'amount': prices_i18n.amount(amount)})\n if event_type == OrderEvents.PAYMENT_CAPTURED.value:\n- amount = Money(\n- amount=params['amount'], currency=settings.DEFAULT_CURRENCY)\n+ amount = get_money_from_params(params['amount'])\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Successfully captured: %(amount)s' % {\n", "issue": "Using of wrong Money constructor on displaying order info in dashboard\n### What I'm trying to achieve\r\nI've tried to add refunds / captures (using dummy provider's preauth. payment)\r\n\r\n### Steps to reproduce the problem\r\n1. Create payment using dummy provider\r\n2. Preauth. payment\r\n3. Make payment refunds and captures in dashboard\r\n4. Open order in dashboard\r\n\r\n### What I expected to happen\r\nI expected to see order page in dashboard, but I've got `conversion from dict to Decimal is not supported` on order's page\r\nI guess the root of the problem is (from Django debugger information):\r\nLine: `/app/saleor/order/__init__.py : 134-135` in display_order_event\r\n\r\nCode:\r\n```\r\namount = Money(\r\n amount=params['amount'], currency=settings.DEFAULT_CURRENCY) \r\n```\r\n\r\nLocal vars:\r\n```\r\nevent_type | 'captured'\r\norder_event | OrderEvent(type='captured', user=<User: ***@***>)\r\nparams | {'amount': {'_type': 'Money', 'amount': '500', 'currency': 'USD'}}\r\n```\r\n\r\nPossible solution:\r\n```\r\namount = Money(\r\n amount=params['amount']['amount'], currency=params['amount']['currency']) \r\n```\r\n\r\n**System information**\r\nOperating system: ubuntu 16.04\r\nBrowser: chrome\r\n\n", "before_files": [{"content": "from enum import Enum\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\nfrom django.utils.translation import npgettext_lazy, pgettext_lazy\nfrom django_prices.templatetags import prices_i18n\nfrom prices import Money\n\n\nclass OrderAppConfig(AppConfig):\n name = 'saleor.order'\n\n def ready(self):\n from payments.signals import status_changed\n from .signals import order_status_change\n status_changed.connect(order_status_change)\n\n\nclass OrderStatus:\n DRAFT = 'draft'\n UNFULFILLED = 'unfulfilled'\n PARTIALLY_FULFILLED = 'partially fulfilled'\n FULFILLED = 'fulfilled'\n CANCELED = 'canceled'\n\n CHOICES = [\n (DRAFT, pgettext_lazy(\n 'Status for a fully editable, not confirmed order created by '\n 'staff users',\n 'Draft')),\n (UNFULFILLED, pgettext_lazy(\n 'Status for an order with any items marked as fulfilled',\n 'Unfulfilled')),\n (PARTIALLY_FULFILLED, pgettext_lazy(\n 'Status for an order with some items marked as fulfilled',\n 'Partially fulfilled')),\n (FULFILLED, pgettext_lazy(\n 'Status for an order with all items marked as fulfilled',\n 'Fulfilled')),\n (CANCELED, pgettext_lazy(\n 'Status for a permanently canceled order',\n 'Canceled'))]\n\n\nclass FulfillmentStatus:\n FULFILLED = 'fulfilled'\n CANCELED = 'canceled'\n\n CHOICES = [\n (FULFILLED, pgettext_lazy(\n 'Status for a group of products in an order marked as fulfilled',\n 'Fulfilled')),\n (CANCELED, pgettext_lazy(\n 'Status for a fulfilled group of products in an order marked '\n 'as canceled',\n 'Canceled'))]\n\n\nclass CustomPaymentChoices:\n MANUAL = 'manual'\n\n CHOICES = [\n (MANUAL, pgettext_lazy('Custom payment choice type', 'Manual'))]\n\n\nclass OrderEvents(Enum):\n PLACED = 'placed'\n PLACED_FROM_DRAFT = 'draft_placed'\n OVERSOLD_ITEMS = 'oversold_items'\n ORDER_MARKED_AS_PAID = 'marked_as_paid'\n CANCELED = 'canceled'\n ORDER_FULLY_PAID = 'order_paid'\n UPDATED = 'updated'\n\n EMAIL_SENT = 'email_sent'\n\n PAYMENT_CAPTURED = 'captured'\n PAYMENT_REFUNDED = 'refunded'\n PAYMENT_RELEASED = 'released'\n\n FULFILLMENT_CANCELED = 'fulfillment_canceled'\n FULFILLMENT_RESTOCKED_ITEMS = 'restocked_items'\n FULFILLMENT_FULFILLED_ITEMS = 'fulfilled_items'\n TRACKING_UPDATED = 'tracking_updated'\n NOTE_ADDED = 'note_added'\n\n # Used mostly for importing legacy data from before Enum-based events\n OTHER = 'other'\n\n\nclass OrderEventsEmails(Enum):\n PAYMENT = 'payment_confirmation'\n SHIPPING = 'shipping_confirmation'\n ORDER = 'order_confirmation'\n FULFILLMENT = 'fulfillment_confirmation'\n\n\nEMAIL_CHOICES = {\n OrderEventsEmails.PAYMENT.value: pgettext_lazy(\n 'Email type', 'Payment confirmation'),\n OrderEventsEmails.SHIPPING.value: pgettext_lazy(\n 'Email type', 'Shipping confirmation'),\n OrderEventsEmails.FULFILLMENT.value: pgettext_lazy(\n 'Email type', 'Fulfillment confirmation'),\n OrderEventsEmails.ORDER.value: pgettext_lazy(\n 'Email type', 'Order confirmation')}\n\n\ndef display_order_event(order_event):\n \"\"\"This function is used to keep the backwards compatibility\n with the old dashboard and new type of order events\n (storing enums instead of messages)\n \"\"\"\n event_type = order_event.type\n params = order_event.parameters\n if event_type == OrderEvents.PLACED_FROM_DRAFT.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order created from draft order by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.PAYMENT_RELEASED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Payment was released by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.PAYMENT_REFUNDED.value:\n amount = Money(\n amount=params['amount'], currency=settings.DEFAULT_CURRENCY)\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Successfully refunded: %(amount)s' % {\n 'amount': prices_i18n.amount(amount)})\n if event_type == OrderEvents.PAYMENT_CAPTURED.value:\n amount = Money(\n amount=params['amount'], currency=settings.DEFAULT_CURRENCY)\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Successfully captured: %(amount)s' % {\n 'amount': prices_i18n.amount(amount)})\n if event_type == OrderEvents.ORDER_MARKED_AS_PAID.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order manually marked as paid by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.CANCELED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order was canceled by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value:\n return npgettext_lazy(\n 'Dashboard message related to an order',\n 'We restocked %(quantity)d item',\n 'We restocked %(quantity)d items',\n 'quantity') % {'quantity': params['quantity']}\n if event_type == OrderEvents.NOTE_ADDED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n '%(user_name)s added note: %(note)s' % {\n 'note': params['message'],\n 'user_name': order_event.user})\n if event_type == OrderEvents.FULFILLMENT_CANCELED.value:\n return pgettext_lazy(\n 'Dashboard message',\n 'Fulfillment #%(fulfillment)s canceled by %(user_name)s') % {\n 'fulfillment': params['composed_id'],\n 'user_name': order_event.user}\n if event_type == OrderEvents.FULFILLMENT_FULFILLED_ITEMS.value:\n return npgettext_lazy(\n 'Dashboard message related to an order',\n 'Fulfilled %(quantity_fulfilled)d item',\n 'Fulfilled %(quantity_fulfilled)d items',\n 'quantity_fulfilled') % {\n 'quantity_fulfilled': params['quantity']}\n if event_type == OrderEvents.PLACED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order was placed')\n if event_type == OrderEvents.ORDER_FULLY_PAID.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order was fully paid')\n if event_type == OrderEvents.EMAIL_SENT.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n '%(email_type)s email was sent to the customer '\n '(%(email)s)') % {\n 'email_type': EMAIL_CHOICES[params['email_type']],\n 'email': params['email']}\n if event_type == OrderEvents.UPDATED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Order details were updated by %(user_name)s' % {\n 'user_name': order_event.user})\n if event_type == OrderEvents.TRACKING_UPDATED.value:\n return pgettext_lazy(\n 'Dashboard message related to an order',\n 'Fulfillment #%(fulfillment)s tracking was updated to'\n ' %(tracking_number)s by %(user_name)s') % {\n 'fulfillment': params['composed_id'],\n 'tracking_number': params['tracking_number'],\n 'user_name': order_event.user}\n if event_type == OrderEvents.OVERSOLD_ITEMS.value:\n return npgettext_lazy(\n 'Dashboard message related to an order',\n '%(quantity)d line item oversold on this order.',\n '%(quantity)d line items oversold on this order.') % {\n 'quantity': len(params['oversold_items'])}\n\n if event_type == OrderEvents.OTHER.value:\n return order_event.parameters['message']\n raise ValueError('Not supported event type: %s' % (event_type))\n", "path": "saleor/order/__init__.py"}]}
| 3,194 | 404 |
gh_patches_debug_35421
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-20134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[doxygen] Model iconv dependency as a conan package
Specify library name and version: **doxygen/1.9.2**
This resolves an issue where `iconv` wasn't being appropriately modelled as a conan dependency in the doxygen recipe. This lead to unresolved symbol errors on Macos builds with conan 2. This was previously part of https://github.com/conan-io/conan-center-index/pull/18415, but is being split to a separate PR to separate concerns and make review easier.
Closes #19903
---
- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).
- [x] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).
- [x] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.
</issue>
<code>
[start of recipes/doxygen/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
4 from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
5 from conan.tools.microsoft import check_min_vs, is_msvc_static_runtime
6 from conan.tools.scm import Version
7 import os
8
9 required_conan_version = ">=1.52.0"
10
11
12 class DoxygenConan(ConanFile):
13 name = "doxygen"
14 description = "A documentation system for C++, C, Java, IDL and PHP --- Note: Dot is disabled in this package"
15 topics = ("installer", "devtool", "documentation")
16 homepage = "https://github.com/doxygen/doxygen"
17 license = "GPL-2.0-or-later"
18 url = "https://github.com/conan-io/conan-center-index"
19 package_type = "application"
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "enable_parse": [True, False],
23 "enable_search": [True, False],
24 }
25 default_options = {
26 "enable_parse": True,
27 "enable_search": True,
28 }
29
30 @property
31 def _settings_build(self):
32 return getattr(self, "settings_build", self.settings)
33
34 @property
35 def _minimum_compiler_version(self):
36 if Version(self.version) <= "1.9.1":
37 return {
38 "gcc": "5",
39 }
40 return {
41 "gcc": "7", # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297
42 "Visual Studio": "15",
43 "msvc": "191",
44 }
45
46 def export_sources(self):
47 export_conandata_patches(self)
48
49 def layout(self):
50 cmake_layout(self, src_folder="src")
51
52 def requirements(self):
53 if self.options.enable_search:
54 self.requires("xapian-core/1.4.19")
55 self.requires("zlib/1.2.13")
56
57 def package_id(self):
58 del self.info.settings.compiler
59
60 def compatibility(self):
61 return [{"settings": [("build_type", "Release")]}]
62
63 def validate(self):
64 minimum_compiler_version = self._minimum_compiler_version.get(str(self.settings.compiler))
65 if minimum_compiler_version and Version(self.settings.compiler.version) < minimum_compiler_version:
66 raise ConanInvalidConfiguration(f"Compiler version too old. At least {minimum_compiler_version} is required.")
67 if Version(self.version) == "1.8.18":
68 check_min_vs(self, "191")
69
70 def build_requirements(self):
71 if self._settings_build.os == "Windows":
72 self.tool_requires("winflexbison/2.5.24")
73 else:
74 self.tool_requires("flex/2.6.4")
75 self.tool_requires("bison/3.8.2")
76
77 def source(self):
78 get(self, **self.conan_data["sources"][self.version], strip_root=True)
79
80 def generate(self):
81 tc = CMakeToolchain(self)
82 tc.variables["build_parse"] = self.options.enable_parse
83 tc.variables["build_search"] = self.options.enable_search
84 tc.variables["use_libc++"] = self.settings.compiler.get_safe("libcxx") == "libc++"
85 tc.variables["win_static"] = is_msvc_static_runtime(self)
86 tc.generate()
87
88 deps = CMakeDeps(self)
89 deps.generate()
90
91 def build(self):
92 apply_conandata_patches(self)
93 cmake = CMake(self)
94 cmake.configure()
95 cmake.build()
96
97 def package(self):
98 copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
99 cmake = CMake(self)
100 cmake.install()
101
102 def package_info(self):
103 self.cpp_info.set_property("cmake_find_mode", "none")
104 self.cpp_info.libdirs = []
105 self.cpp_info.includedirs = []
106
107 # TODO: to remove in conan v2
108 self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
109
[end of recipes/doxygen/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/doxygen/all/conanfile.py b/recipes/doxygen/all/conanfile.py
--- a/recipes/doxygen/all/conanfile.py
+++ b/recipes/doxygen/all/conanfile.py
@@ -21,10 +21,12 @@
options = {
"enable_parse": [True, False],
"enable_search": [True, False],
+ "enable_app": [True, False],
}
default_options = {
"enable_parse": True,
"enable_search": True,
+ "enable_app": False,
}
@property
@@ -52,10 +54,10 @@
def requirements(self):
if self.options.enable_search:
self.requires("xapian-core/1.4.19")
- self.requires("zlib/1.2.13")
-
- def package_id(self):
- del self.info.settings.compiler
+ self.requires("zlib/[>=1.2.11 <2]")
+ if self.options.enable_app or self.options.enable_parse:
+ # INFO: Doxygen uses upper case CMake variables to link/include IConv, so we are using patches for targets.
+ self.requires("libiconv/1.17")
def compatibility(self):
return [{"settings": [("build_type", "Release")]}]
@@ -81,6 +83,7 @@
tc = CMakeToolchain(self)
tc.variables["build_parse"] = self.options.enable_parse
tc.variables["build_search"] = self.options.enable_search
+ tc.variables["build_app"] = self.options.enable_app
tc.variables["use_libc++"] = self.settings.compiler.get_safe("libcxx") == "libc++"
tc.variables["win_static"] = is_msvc_static_runtime(self)
tc.generate()
@@ -103,6 +106,8 @@
self.cpp_info.set_property("cmake_find_mode", "none")
self.cpp_info.libdirs = []
self.cpp_info.includedirs = []
+ if self.settings.os in ["Linux", "FreeBSD"]:
+ self.cpp_info.system_libs = ["pthread", "m"]
# TODO: to remove in conan v2
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
|
{"golden_diff": "diff --git a/recipes/doxygen/all/conanfile.py b/recipes/doxygen/all/conanfile.py\n--- a/recipes/doxygen/all/conanfile.py\n+++ b/recipes/doxygen/all/conanfile.py\n@@ -21,10 +21,12 @@\n options = {\n \"enable_parse\": [True, False],\n \"enable_search\": [True, False],\n+ \"enable_app\": [True, False],\n }\n default_options = {\n \"enable_parse\": True,\n \"enable_search\": True,\n+ \"enable_app\": False,\n }\n \n @property\n@@ -52,10 +54,10 @@\n def requirements(self):\n if self.options.enable_search:\n self.requires(\"xapian-core/1.4.19\")\n- self.requires(\"zlib/1.2.13\")\n-\n- def package_id(self):\n- del self.info.settings.compiler\n+ self.requires(\"zlib/[>=1.2.11 <2]\")\n+ if self.options.enable_app or self.options.enable_parse:\n+ # INFO: Doxygen uses upper case CMake variables to link/include IConv, so we are using patches for targets.\n+ self.requires(\"libiconv/1.17\")\n \n def compatibility(self):\n return [{\"settings\": [(\"build_type\", \"Release\")]}]\n@@ -81,6 +83,7 @@\n tc = CMakeToolchain(self)\n tc.variables[\"build_parse\"] = self.options.enable_parse\n tc.variables[\"build_search\"] = self.options.enable_search\n+ tc.variables[\"build_app\"] = self.options.enable_app\n tc.variables[\"use_libc++\"] = self.settings.compiler.get_safe(\"libcxx\") == \"libc++\"\n tc.variables[\"win_static\"] = is_msvc_static_runtime(self)\n tc.generate()\n@@ -103,6 +106,8 @@\n self.cpp_info.set_property(\"cmake_find_mode\", \"none\")\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n+ if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n+ self.cpp_info.system_libs = [\"pthread\", \"m\"]\n \n # TODO: to remove in conan v2\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "issue": "[doxygen] Model iconv dependency as a conan package\nSpecify library name and version: **doxygen/1.9.2**\r\n\r\nThis resolves an issue where `iconv` wasn't being appropriately modelled as a conan dependency in the doxygen recipe. This lead to unresolved symbol errors on Macos builds with conan 2. This was previously part of https://github.com/conan-io/conan-center-index/pull/18415, but is being split to a separate PR to separate concerns and make review easier.\r\n\r\nCloses #19903 \r\n\r\n---\r\n\r\n- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).\r\n- [x] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).\r\n- [x] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get\nfrom conan.tools.microsoft import check_min_vs, is_msvc_static_runtime\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass DoxygenConan(ConanFile):\n name = \"doxygen\"\n description = \"A documentation system for C++, C, Java, IDL and PHP --- Note: Dot is disabled in this package\"\n topics = (\"installer\", \"devtool\", \"documentation\")\n homepage = \"https://github.com/doxygen/doxygen\"\n license = \"GPL-2.0-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n package_type = \"application\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"enable_parse\": [True, False],\n \"enable_search\": [True, False],\n }\n default_options = {\n \"enable_parse\": True,\n \"enable_search\": True,\n }\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n @property\n def _minimum_compiler_version(self):\n if Version(self.version) <= \"1.9.1\":\n return {\n \"gcc\": \"5\",\n }\n return {\n \"gcc\": \"7\", # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\",\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n if self.options.enable_search:\n self.requires(\"xapian-core/1.4.19\")\n self.requires(\"zlib/1.2.13\")\n\n def package_id(self):\n del self.info.settings.compiler\n\n def compatibility(self):\n return [{\"settings\": [(\"build_type\", \"Release\")]}]\n\n def validate(self):\n minimum_compiler_version = self._minimum_compiler_version.get(str(self.settings.compiler))\n if minimum_compiler_version and Version(self.settings.compiler.version) < minimum_compiler_version:\n raise ConanInvalidConfiguration(f\"Compiler version too old. At least {minimum_compiler_version} is required.\")\n if Version(self.version) == \"1.8.18\":\n check_min_vs(self, \"191\")\n\n def build_requirements(self):\n if self._settings_build.os == \"Windows\":\n self.tool_requires(\"winflexbison/2.5.24\")\n else:\n self.tool_requires(\"flex/2.6.4\")\n self.tool_requires(\"bison/3.8.2\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"build_parse\"] = self.options.enable_parse\n tc.variables[\"build_search\"] = self.options.enable_search\n tc.variables[\"use_libc++\"] = self.settings.compiler.get_safe(\"libcxx\") == \"libc++\"\n tc.variables[\"win_static\"] = is_msvc_static_runtime(self)\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_find_mode\", \"none\")\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n\n # TODO: to remove in conan v2\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/doxygen/all/conanfile.py"}]}
| 1,923 | 507 |
gh_patches_debug_2934
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-1433
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
USAVars Augmentation maps to 0
### Description
In the USAVars Datamodule, the default augmentation from NonGeoDatamodule is used. However, the dataset returns uint8 data, and it comes out of the augmentation still as uint8. This means you get an error when trying to train but also that your input images are just all zeros.
### Steps to reproduce
```
dm = USAVarsDataModule(root="path/to/usa_vars", batch_size=16)
dm.setup("fit")
dl = dm.train_dataloader()
batch = next(iter(dl))
aug_batch = dm.aug(batch)
print(aug_batch["image"].max())
```
### Version
'0.5.0.dev0'
</issue>
<code>
[start of torchgeo/datasets/usavars.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """USAVars dataset."""
5
6 import glob
7 import os
8 from collections.abc import Sequence
9 from typing import Callable, Optional
10
11 import matplotlib.pyplot as plt
12 import numpy as np
13 import rasterio
14 import torch
15 from matplotlib.figure import Figure
16 from torch import Tensor
17
18 from .geo import NonGeoDataset
19 from .utils import download_url, extract_archive
20
21
22 class USAVars(NonGeoDataset):
23 """USAVars dataset.
24
25 The USAVars dataset is reproduction of the dataset used in the paper "`A
26 generalizable and accessible approach to machine learning with global satellite
27 imagery <https://doi.org/10.1038/s41467-021-24638-z>`_". Specifically, this dataset
28 includes 1 sq km. crops of NAIP imagery resampled to 4m/px cenetered on ~100k points
29 that are sampled randomly from the contiguous states in the USA. Each point contains
30 three continuous valued labels (taken from the dataset released in the paper): tree
31 cover percentage, elevation, and population density.
32
33 Dataset format:
34
35 * images are 4-channel GeoTIFFs
36 * labels are singular float values
37
38 Dataset labels:
39
40 * tree cover
41 * elevation
42 * population density
43
44 If you use this dataset in your research, please cite the following paper:
45
46 * https://doi.org/10.1038/s41467-021-24638-z
47
48 .. versionadded:: 0.3
49 """
50
51 url_prefix = (
52 "https://files.codeocean.com/files/verified/"
53 + "fa908bbc-11f9-4421-8bd3-72a4bf00427f_v2.0/data/int/applications"
54 )
55 pop_csv_suffix = "CONTUS_16_640_POP_100000_0.csv?download"
56 uar_csv_suffix = "CONTUS_16_640_UAR_100000_0.csv?download"
57
58 data_url = "https://mosaiks.blob.core.windows.net/datasets/uar.zip"
59 dirname = "uar"
60
61 md5 = "677e89fd20e5dd0fe4d29b61827c2456"
62
63 label_urls = {
64 "housing": f"{url_prefix}/housing/outcomes_sampled_housing_{pop_csv_suffix}",
65 "income": f"{url_prefix}/income/outcomes_sampled_income_{pop_csv_suffix}",
66 "roads": f"{url_prefix}/roads/outcomes_sampled_roads_{pop_csv_suffix}",
67 "nightlights": f"{url_prefix}/nightlights/"
68 + f"outcomes_sampled_nightlights_{pop_csv_suffix}",
69 "population": f"{url_prefix}/population/"
70 + f"outcomes_sampled_population_{uar_csv_suffix}",
71 "elevation": f"{url_prefix}/elevation/"
72 + f"outcomes_sampled_elevation_{uar_csv_suffix}",
73 "treecover": f"{url_prefix}/treecover/"
74 + f"outcomes_sampled_treecover_{uar_csv_suffix}",
75 }
76
77 split_metadata = {
78 "train": {
79 "url": "https://mosaiks.blob.core.windows.net/datasets/train_split.txt",
80 "filename": "train_split.txt",
81 "md5": "3f58fffbf5fe177611112550297200e7",
82 },
83 "val": {
84 "url": "https://mosaiks.blob.core.windows.net/datasets/val_split.txt",
85 "filename": "val_split.txt",
86 "md5": "bca7183b132b919dec0fc24fb11662a0",
87 },
88 "test": {
89 "url": "https://mosaiks.blob.core.windows.net/datasets/test_split.txt",
90 "filename": "test_split.txt",
91 "md5": "97bb36bc003ae0bf556a8d6e8f77141a",
92 },
93 }
94
95 ALL_LABELS = ["treecover", "elevation", "population"]
96
97 def __init__(
98 self,
99 root: str = "data",
100 split: str = "train",
101 labels: Sequence[str] = ALL_LABELS,
102 transforms: Optional[Callable[[dict[str, Tensor]], dict[str, Tensor]]] = None,
103 download: bool = False,
104 checksum: bool = False,
105 ) -> None:
106 """Initialize a new USAVars dataset instance.
107
108 Args:
109 root: root directory where dataset can be found
110 split: train/val/test split to load
111 labels: list of labels to include
112 transforms: a function/transform that takes input sample and its target as
113 entry and returns a transformed version
114 download: if True, download dataset and store it in the root directory
115 checksum: if True, check the MD5 of the downloaded files (may be slow)
116
117 Raises:
118 AssertionError: if invalid labels are provided
119 ImportError: if pandas is not installed
120 RuntimeError: if ``download=False`` and data is not found, or checksums
121 don't match
122 """
123 self.root = root
124
125 assert split in self.split_metadata
126 self.split = split
127
128 for lab in labels:
129 assert lab in self.ALL_LABELS
130
131 self.labels = labels
132 self.transforms = transforms
133 self.download = download
134 self.checksum = checksum
135
136 self._verify()
137
138 try:
139 import pandas as pd # noqa: F401
140 except ImportError:
141 raise ImportError(
142 "pandas is not installed and is required to use this dataset"
143 )
144
145 self.files = self._load_files()
146
147 self.label_dfs = {
148 lab: pd.read_csv(os.path.join(self.root, lab + ".csv"), index_col="ID")
149 for lab in self.labels
150 }
151
152 def __getitem__(self, index: int) -> dict[str, Tensor]:
153 """Return an index within the dataset.
154
155 Args:
156 index: index to return
157
158 Returns:
159 data and label at that index
160 """
161 tif_file = self.files[index]
162 id_ = tif_file[5:-4]
163
164 sample = {
165 "labels": Tensor(
166 [self.label_dfs[lab].loc[id_][lab] for lab in self.labels]
167 ),
168 "image": self._load_image(os.path.join(self.root, "uar", tif_file)),
169 "centroid_lat": Tensor([self.label_dfs[self.labels[0]].loc[id_]["lat"]]),
170 "centroid_lon": Tensor([self.label_dfs[self.labels[0]].loc[id_]["lon"]]),
171 }
172
173 if self.transforms is not None:
174 sample = self.transforms(sample)
175
176 return sample
177
178 def __len__(self) -> int:
179 """Return the number of data points in the dataset.
180
181 Returns:
182 length of the dataset
183 """
184 return len(self.files)
185
186 def _load_files(self) -> list[str]:
187 """Loads file names."""
188 with open(os.path.join(self.root, f"{self.split}_split.txt")) as f:
189 files = f.read().splitlines()
190 return files
191
192 def _load_image(self, path: str) -> Tensor:
193 """Load a single image.
194
195 Args:
196 path: path to the image
197
198 Returns:
199 the image
200 """
201 with rasterio.open(path) as f:
202 array: "np.typing.NDArray[np.int_]" = f.read()
203 tensor = torch.from_numpy(array)
204 return tensor
205
206 def _verify(self) -> None:
207 """Verify the integrity of the dataset.
208
209 Raises:
210 RuntimeError: if ``download=False`` but dataset is missing or checksum fails
211 """
212 # Check if the extracted files already exist
213 pathname = os.path.join(self.root, "uar")
214 csv_pathname = os.path.join(self.root, "*.csv")
215 split_pathname = os.path.join(self.root, "*_split.txt")
216
217 csv_split_count = (len(glob.glob(csv_pathname)), len(glob.glob(split_pathname)))
218 if glob.glob(pathname) and csv_split_count == (7, 3):
219 return
220
221 # Check if the zip files have already been downloaded
222 pathname = os.path.join(self.root, self.dirname + ".zip")
223 if glob.glob(pathname) and csv_split_count == (7, 3):
224 self._extract()
225 return
226
227 # Check if the user requested to download the dataset
228 if not self.download:
229 raise RuntimeError(
230 f"Dataset not found in `root={self.root}` and `download=False`, "
231 "either specify a different `root` directory or use `download=True` "
232 "to automatically download the dataset."
233 )
234
235 self._download()
236 self._extract()
237
238 def _download(self) -> None:
239 """Download the dataset."""
240 for f_name in self.label_urls:
241 download_url(self.label_urls[f_name], self.root, filename=f_name + ".csv")
242
243 download_url(self.data_url, self.root, md5=self.md5 if self.checksum else None)
244
245 for metadata in self.split_metadata.values():
246 download_url(
247 metadata["url"],
248 self.root,
249 md5=metadata["md5"] if self.checksum else None,
250 )
251
252 def _extract(self) -> None:
253 """Extract the dataset."""
254 extract_archive(os.path.join(self.root, self.dirname + ".zip"))
255
256 def plot(
257 self,
258 sample: dict[str, Tensor],
259 show_labels: bool = True,
260 suptitle: Optional[str] = None,
261 ) -> Figure:
262 """Plot a sample from the dataset.
263
264 Args:
265 sample: a sample returned by :meth:`__getitem__`
266 show_labels: flag indicating whether to show labels above panel
267 suptitle: optional string to use as a suptitle
268
269 Returns:
270 a matplotlib Figure with the rendered sample
271 """
272 image = sample["image"][:3].numpy() # get RGB inds
273 image = np.moveaxis(image, 0, 2)
274
275 fig, axs = plt.subplots(figsize=(10, 10))
276 axs.imshow(image)
277 axs.axis("off")
278
279 if show_labels:
280 labels = [(lab, val) for lab, val in sample.items() if lab != "image"]
281 label_string = ""
282 for lab, val in labels:
283 label_string += f"{lab}={round(val[0].item(), 2)} "
284 axs.set_title(label_string)
285
286 if suptitle is not None:
287 plt.suptitle(suptitle)
288
289 return fig
290
[end of torchgeo/datasets/usavars.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchgeo/datasets/usavars.py b/torchgeo/datasets/usavars.py
--- a/torchgeo/datasets/usavars.py
+++ b/torchgeo/datasets/usavars.py
@@ -200,7 +200,7 @@
"""
with rasterio.open(path) as f:
array: "np.typing.NDArray[np.int_]" = f.read()
- tensor = torch.from_numpy(array)
+ tensor = torch.from_numpy(array).float()
return tensor
def _verify(self) -> None:
|
{"golden_diff": "diff --git a/torchgeo/datasets/usavars.py b/torchgeo/datasets/usavars.py\n--- a/torchgeo/datasets/usavars.py\n+++ b/torchgeo/datasets/usavars.py\n@@ -200,7 +200,7 @@\n \"\"\"\n with rasterio.open(path) as f:\n array: \"np.typing.NDArray[np.int_]\" = f.read()\n- tensor = torch.from_numpy(array)\n+ tensor = torch.from_numpy(array).float()\n return tensor\n \n def _verify(self) -> None:\n", "issue": "USAVars Augmentation maps to 0\n### Description\r\n\r\nIn the USAVars Datamodule, the default augmentation from NonGeoDatamodule is used. However, the dataset returns uint8 data, and it comes out of the augmentation still as uint8. This means you get an error when trying to train but also that your input images are just all zeros.\r\n\r\n### Steps to reproduce\r\n```\r\ndm = USAVarsDataModule(root=\"path/to/usa_vars\", batch_size=16)\r\ndm.setup(\"fit\")\r\ndl = dm.train_dataloader()\r\nbatch = next(iter(dl))\r\naug_batch = dm.aug(batch)\r\nprint(aug_batch[\"image\"].max())\r\n```\r\n\r\n### Version\r\n\r\n'0.5.0.dev0'\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"USAVars dataset.\"\"\"\n\nimport glob\nimport os\nfrom collections.abc import Sequence\nfrom typing import Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rasterio\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import download_url, extract_archive\n\n\nclass USAVars(NonGeoDataset):\n \"\"\"USAVars dataset.\n\n The USAVars dataset is reproduction of the dataset used in the paper \"`A\n generalizable and accessible approach to machine learning with global satellite\n imagery <https://doi.org/10.1038/s41467-021-24638-z>`_\". Specifically, this dataset\n includes 1 sq km. crops of NAIP imagery resampled to 4m/px cenetered on ~100k points\n that are sampled randomly from the contiguous states in the USA. Each point contains\n three continuous valued labels (taken from the dataset released in the paper): tree\n cover percentage, elevation, and population density.\n\n Dataset format:\n\n * images are 4-channel GeoTIFFs\n * labels are singular float values\n\n Dataset labels:\n\n * tree cover\n * elevation\n * population density\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://doi.org/10.1038/s41467-021-24638-z\n\n .. versionadded:: 0.3\n \"\"\"\n\n url_prefix = (\n \"https://files.codeocean.com/files/verified/\"\n + \"fa908bbc-11f9-4421-8bd3-72a4bf00427f_v2.0/data/int/applications\"\n )\n pop_csv_suffix = \"CONTUS_16_640_POP_100000_0.csv?download\"\n uar_csv_suffix = \"CONTUS_16_640_UAR_100000_0.csv?download\"\n\n data_url = \"https://mosaiks.blob.core.windows.net/datasets/uar.zip\"\n dirname = \"uar\"\n\n md5 = \"677e89fd20e5dd0fe4d29b61827c2456\"\n\n label_urls = {\n \"housing\": f\"{url_prefix}/housing/outcomes_sampled_housing_{pop_csv_suffix}\",\n \"income\": f\"{url_prefix}/income/outcomes_sampled_income_{pop_csv_suffix}\",\n \"roads\": f\"{url_prefix}/roads/outcomes_sampled_roads_{pop_csv_suffix}\",\n \"nightlights\": f\"{url_prefix}/nightlights/\"\n + f\"outcomes_sampled_nightlights_{pop_csv_suffix}\",\n \"population\": f\"{url_prefix}/population/\"\n + f\"outcomes_sampled_population_{uar_csv_suffix}\",\n \"elevation\": f\"{url_prefix}/elevation/\"\n + f\"outcomes_sampled_elevation_{uar_csv_suffix}\",\n \"treecover\": f\"{url_prefix}/treecover/\"\n + f\"outcomes_sampled_treecover_{uar_csv_suffix}\",\n }\n\n split_metadata = {\n \"train\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/train_split.txt\",\n \"filename\": \"train_split.txt\",\n \"md5\": \"3f58fffbf5fe177611112550297200e7\",\n },\n \"val\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/val_split.txt\",\n \"filename\": \"val_split.txt\",\n \"md5\": \"bca7183b132b919dec0fc24fb11662a0\",\n },\n \"test\": {\n \"url\": \"https://mosaiks.blob.core.windows.net/datasets/test_split.txt\",\n \"filename\": \"test_split.txt\",\n \"md5\": \"97bb36bc003ae0bf556a8d6e8f77141a\",\n },\n }\n\n ALL_LABELS = [\"treecover\", \"elevation\", \"population\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n labels: Sequence[str] = ALL_LABELS,\n transforms: Optional[Callable[[dict[str, Tensor]], dict[str, Tensor]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new USAVars dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: train/val/test split to load\n labels: list of labels to include\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if invalid labels are provided\n ImportError: if pandas is not installed\n RuntimeError: if ``download=False`` and data is not found, or checksums\n don't match\n \"\"\"\n self.root = root\n\n assert split in self.split_metadata\n self.split = split\n\n for lab in labels:\n assert lab in self.ALL_LABELS\n\n self.labels = labels\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n\n try:\n import pandas as pd # noqa: F401\n except ImportError:\n raise ImportError(\n \"pandas is not installed and is required to use this dataset\"\n )\n\n self.files = self._load_files()\n\n self.label_dfs = {\n lab: pd.read_csv(os.path.join(self.root, lab + \".csv\"), index_col=\"ID\")\n for lab in self.labels\n }\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n tif_file = self.files[index]\n id_ = tif_file[5:-4]\n\n sample = {\n \"labels\": Tensor(\n [self.label_dfs[lab].loc[id_][lab] for lab in self.labels]\n ),\n \"image\": self._load_image(os.path.join(self.root, \"uar\", tif_file)),\n \"centroid_lat\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lat\"]]),\n \"centroid_lon\": Tensor([self.label_dfs[self.labels[0]].loc[id_][\"lon\"]]),\n }\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.files)\n\n def _load_files(self) -> list[str]:\n \"\"\"Loads file names.\"\"\"\n with open(os.path.join(self.root, f\"{self.split}_split.txt\")) as f:\n files = f.read().splitlines()\n return files\n\n def _load_image(self, path: str) -> Tensor:\n \"\"\"Load a single image.\n\n Args:\n path: path to the image\n\n Returns:\n the image\n \"\"\"\n with rasterio.open(path) as f:\n array: \"np.typing.NDArray[np.int_]\" = f.read()\n tensor = torch.from_numpy(array)\n return tensor\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, \"uar\")\n csv_pathname = os.path.join(self.root, \"*.csv\")\n split_pathname = os.path.join(self.root, \"*_split.txt\")\n\n csv_split_count = (len(glob.glob(csv_pathname)), len(glob.glob(split_pathname)))\n if glob.glob(pathname) and csv_split_count == (7, 3):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dirname + \".zip\")\n if glob.glob(pathname) and csv_split_count == (7, 3):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n for f_name in self.label_urls:\n download_url(self.label_urls[f_name], self.root, filename=f_name + \".csv\")\n\n download_url(self.data_url, self.root, md5=self.md5 if self.checksum else None)\n\n for metadata in self.split_metadata.values():\n download_url(\n metadata[\"url\"],\n self.root,\n md5=metadata[\"md5\"] if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n extract_archive(os.path.join(self.root, self.dirname + \".zip\"))\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_labels: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`__getitem__`\n show_labels: flag indicating whether to show labels above panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"][:3].numpy() # get RGB inds\n image = np.moveaxis(image, 0, 2)\n\n fig, axs = plt.subplots(figsize=(10, 10))\n axs.imshow(image)\n axs.axis(\"off\")\n\n if show_labels:\n labels = [(lab, val) for lab, val in sample.items() if lab != \"image\"]\n label_string = \"\"\n for lab, val in labels:\n label_string += f\"{lab}={round(val[0].item(), 2)} \"\n axs.set_title(label_string)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/usavars.py"}]}
| 3,866 | 125 |
gh_patches_debug_310
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-7454
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A header with Japanese text has no anchor link.
### Summary
I found that a header with Japanese text has no anchor link.
### Steps to reproduce
Code snippet:
```
import streamlit as st
st.header("セクション")
```
1. Run code snippet above.
2. Check if the header has anchor link or not.
**Expected behavior:**
The header ("セクション") has anchor link.
**Actual behavior:**
The header ("セクション") has no anchor link.
### Is this a regression?
No
### Debug info
- Streamlit version: Streamlit, version 1.10.0
- Python version: Python 3.8.10
- Using Conda
- OS version: Ubuntu 20.04.4 LTS
- Browser version: Chrome / Version 104.0.5112.101 (Official Build) (x86_64)
### Additional information
A header with Korean text or Chinese text also has no anchor link.
</issue>
<code>
[start of e2e/scripts/st_title.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.title("This title is awesome!")
18 st.title("This title is awesome too!", anchor="awesome-title")
19
[end of e2e/scripts/st_title.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/e2e/scripts/st_title.py b/e2e/scripts/st_title.py
--- a/e2e/scripts/st_title.py
+++ b/e2e/scripts/st_title.py
@@ -16,3 +16,6 @@
st.title("This title is awesome!")
st.title("This title is awesome too!", anchor="awesome-title")
+
+st.title("日本語タイトル")
+st.title("その他の邦題", anchor="アンカー")
|
{"golden_diff": "diff --git a/e2e/scripts/st_title.py b/e2e/scripts/st_title.py\n--- a/e2e/scripts/st_title.py\n+++ b/e2e/scripts/st_title.py\n@@ -16,3 +16,6 @@\n \n st.title(\"This title is awesome!\")\n st.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n+\n+st.title(\"\u65e5\u672c\u8a9e\u30bf\u30a4\u30c8\u30eb\")\n+st.title(\"\u305d\u306e\u4ed6\u306e\u90a6\u984c\", anchor=\"\u30a2\u30f3\u30ab\u30fc\")\n", "issue": "A header with Japanese text has no anchor link.\n### Summary\r\n\r\nI found that a header with Japanese text has no anchor link.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nimport streamlit as st\r\nst.header(\"\u30bb\u30af\u30b7\u30e7\u30f3\")\r\n```\r\n\r\n1. Run code snippet above.\r\n2. Check if the header has anchor link or not.\r\n\r\n**Expected behavior:**\r\n\r\nThe header (\"\u30bb\u30af\u30b7\u30e7\u30f3\") has anchor link.\r\n\r\n**Actual behavior:**\r\n\r\nThe header (\"\u30bb\u30af\u30b7\u30e7\u30f3\") has no anchor link.\r\n\r\n### Is this a regression?\r\n\r\nNo\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit, version 1.10.0\r\n- Python version: Python 3.8.10\r\n- Using Conda\r\n- OS version: Ubuntu 20.04.4 LTS\r\n- Browser version: Chrome / Version 104.0.5112.101 (Official Build) (x86_64)\r\n\r\n### Additional information\r\n\r\nA header with Korean text or Chinese text also has no anchor link.\r\n\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.title(\"This title is awesome!\")\nst.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n", "path": "e2e/scripts/st_title.py"}]}
| 968 | 98 |
gh_patches_debug_41324
|
rasdani/github-patches
|
git_diff
|
tough-dev-school__education-backend-885
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Админка: по-умолчанию показывать только включенные промокоды
У нас накопилась огромная гора промокодов, и теперь при первом входе в админку сложно ориентироваться. Нужно сделать, чтобы по-умолчанию в админке показывались только включенные промокоды.
</issue>
<code>
[start of src/app/admin/filters.py]
1 from django.contrib import admin
2 from django.utils.translation import gettext_lazy as _
3
4
5 class BooleanFilter(admin.SimpleListFilter):
6 """
7 Abstract base class for simple boolean filter in admin. You should define only
8 `title`, unique `parameter_name` and two methods: `t` and `f`, returning a queryset
9 when filter is set to True and False respectively:
10 class HasClassesFilter(BooleanFilter):
11 title = _('Has classes')
12 parameter_name = 'has_classes'
13 def t(self, request, queryset):
14 return queryset.filter(classes__isnull=False).distinct('pk')
15 def n(self, request, queryset):
16 return queryset.filter(classes__isnull=True)
17 """
18 def lookups(self, request, model_admin):
19 return (
20 ('t', _('Yes')),
21 ('f', _('No')),
22 )
23
24 def queryset(self, request, queryset):
25 if not self.value():
26 return queryset
27 else:
28 if self.value() == 't':
29 return self.t(request, queryset)
30 else:
31 return self.f(request, queryset)
32
[end of src/app/admin/filters.py]
[start of src/orders/admin/promocodes/admin.py]
1 from django.urls import reverse
2 from django.utils.safestring import mark_safe
3 from django.utils.translation import gettext_lazy as _
4
5 from app.admin import ModelAdmin, admin
6 from orders.admin.promocodes import actions
7 from orders.models import PromoCode
8
9
10 @admin.register(PromoCode)
11 class PromoCodeAdmin(ModelAdmin):
12 list_display = [
13 'id',
14 'name',
15 'discount_percent',
16 'discount_value',
17 'order_count',
18 'comment',
19 'active',
20 ]
21
22 list_editable = [
23 'active',
24 ]
25
26 list_filter = [
27 'active',
28 ]
29
30 actions = [actions.deactivate]
31
32 def get_queryset(self, request):
33 return super().get_queryset(request) \
34 .with_order_count()
35
36 @mark_safe
37 @admin.display(description=_('Order count'), ordering='order_count')
38 def order_count(self, obj=None):
39 if hasattr(obj, 'order_count') and obj.order_count:
40 orders_url = reverse('admin:orders_order_changelist')
41 return f'<a href="{orders_url}?is_paid=t&promocode_id={obj.id}">{obj.order_count}</a>'
42
43 return '—'
44
[end of src/orders/admin/promocodes/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/app/admin/filters.py b/src/app/admin/filters.py
--- a/src/app/admin/filters.py
+++ b/src/app/admin/filters.py
@@ -12,7 +12,7 @@
parameter_name = 'has_classes'
def t(self, request, queryset):
return queryset.filter(classes__isnull=False).distinct('pk')
- def n(self, request, queryset):
+ def f(self, request, queryset):
return queryset.filter(classes__isnull=True)
"""
def lookups(self, request, model_admin):
@@ -24,8 +24,22 @@
def queryset(self, request, queryset):
if not self.value():
return queryset
- else:
- if self.value() == 't':
- return self.t(request, queryset)
- else:
- return self.f(request, queryset)
+
+ if self.value() == 't':
+ return self.t(request, queryset)
+
+ return self.f(request, queryset)
+
+
+class DefaultTrueBooleanFilter(BooleanFilter):
+ def queryset(self, request, queryset):
+ if not self.value() or self.value() == 't':
+ return self.t(request, queryset)
+
+ return self.f(request, queryset)
+
+
+__all__ = [
+ 'BooleanFilter',
+ 'DefaultTrueBooleanFilter',
+]
diff --git a/src/orders/admin/promocodes/admin.py b/src/orders/admin/promocodes/admin.py
--- a/src/orders/admin/promocodes/admin.py
+++ b/src/orders/admin/promocodes/admin.py
@@ -1,31 +1,45 @@
-from django.urls import reverse
-from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from app.admin import ModelAdmin, admin
+from app.admin.filters import DefaultTrueBooleanFilter
from orders.admin.promocodes import actions
from orders.models import PromoCode
+class PromodeActiveFilter(DefaultTrueBooleanFilter):
+ title = _('Active')
+ parameter_name = 'is_active'
+
+ def t(self, request, queryset):
+ return queryset.filter(active=True)
+
+ def f(self, request, queryset):
+ return queryset.filter(active=False)
+
+
@admin.register(PromoCode)
class PromoCodeAdmin(ModelAdmin):
- list_display = [
+ list_display = (
'id',
'name',
- 'discount_percent',
- 'discount_value',
+ 'discount',
'order_count',
'comment',
'active',
- ]
+ )
list_editable = [
'active',
]
- list_filter = [
- 'active',
- ]
+ list_filter = (
+ PromodeActiveFilter,
+ )
+
+ list_display_links = (
+ 'id',
+ 'name',
+ )
actions = [actions.deactivate]
@@ -33,11 +47,19 @@
return super().get_queryset(request) \
.with_order_count()
- @mark_safe
@admin.display(description=_('Order count'), ordering='order_count')
- def order_count(self, obj=None):
+ def order_count(self, obj: PromoCode | None = None) -> str:
if hasattr(obj, 'order_count') and obj.order_count:
- orders_url = reverse('admin:orders_order_changelist')
- return f'<a href="{orders_url}?is_paid=t&promocode_id={obj.id}">{obj.order_count}</a>'
+ return str(obj.order_count)
return '—'
+
+ @admin.display(description=_('Discount'), ordering='discount_percent')
+ def discount(self, obj: PromoCode | None = None) -> str:
+ if not obj:
+ return '—'
+
+ if obj.discount_value is not None:
+ return f'{obj.discount_value} ₽'
+
+ return f'{obj.discount_percent} %'
|
{"golden_diff": "diff --git a/src/app/admin/filters.py b/src/app/admin/filters.py\n--- a/src/app/admin/filters.py\n+++ b/src/app/admin/filters.py\n@@ -12,7 +12,7 @@\n parameter_name = 'has_classes'\n def t(self, request, queryset):\n return queryset.filter(classes__isnull=False).distinct('pk')\n- def n(self, request, queryset):\n+ def f(self, request, queryset):\n return queryset.filter(classes__isnull=True)\n \"\"\"\n def lookups(self, request, model_admin):\n@@ -24,8 +24,22 @@\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n- else:\n- if self.value() == 't':\n- return self.t(request, queryset)\n- else:\n- return self.f(request, queryset)\n+\n+ if self.value() == 't':\n+ return self.t(request, queryset)\n+\n+ return self.f(request, queryset)\n+\n+\n+class DefaultTrueBooleanFilter(BooleanFilter):\n+ def queryset(self, request, queryset):\n+ if not self.value() or self.value() == 't':\n+ return self.t(request, queryset)\n+\n+ return self.f(request, queryset)\n+\n+\n+__all__ = [\n+ 'BooleanFilter',\n+ 'DefaultTrueBooleanFilter',\n+]\ndiff --git a/src/orders/admin/promocodes/admin.py b/src/orders/admin/promocodes/admin.py\n--- a/src/orders/admin/promocodes/admin.py\n+++ b/src/orders/admin/promocodes/admin.py\n@@ -1,31 +1,45 @@\n-from django.urls import reverse\n-from django.utils.safestring import mark_safe\n from django.utils.translation import gettext_lazy as _\n \n from app.admin import ModelAdmin, admin\n+from app.admin.filters import DefaultTrueBooleanFilter\n from orders.admin.promocodes import actions\n from orders.models import PromoCode\n \n \n+class PromodeActiveFilter(DefaultTrueBooleanFilter):\n+ title = _('Active')\n+ parameter_name = 'is_active'\n+\n+ def t(self, request, queryset):\n+ return queryset.filter(active=True)\n+\n+ def f(self, request, queryset):\n+ return queryset.filter(active=False)\n+\n+\n @admin.register(PromoCode)\n class PromoCodeAdmin(ModelAdmin):\n- list_display = [\n+ list_display = (\n 'id',\n 'name',\n- 'discount_percent',\n- 'discount_value',\n+ 'discount',\n 'order_count',\n 'comment',\n 'active',\n- ]\n+ )\n \n list_editable = [\n 'active',\n ]\n \n- list_filter = [\n- 'active',\n- ]\n+ list_filter = (\n+ PromodeActiveFilter,\n+ )\n+\n+ list_display_links = (\n+ 'id',\n+ 'name',\n+ )\n \n actions = [actions.deactivate]\n \n@@ -33,11 +47,19 @@\n return super().get_queryset(request) \\\n .with_order_count()\n \n- @mark_safe\n @admin.display(description=_('Order count'), ordering='order_count')\n- def order_count(self, obj=None):\n+ def order_count(self, obj: PromoCode | None = None) -> str:\n if hasattr(obj, 'order_count') and obj.order_count:\n- orders_url = reverse('admin:orders_order_changelist')\n- return f'<a href=\"{orders_url}?is_paid=t&promocode_id={obj.id}\">{obj.order_count}</a>'\n+ return str(obj.order_count)\n \n return '\u2014'\n+\n+ @admin.display(description=_('Discount'), ordering='discount_percent')\n+ def discount(self, obj: PromoCode | None = None) -> str:\n+ if not obj:\n+ return '\u2014'\n+\n+ if obj.discount_value is not None:\n+ return f'{obj.discount_value} \u20bd'\n+\n+ return f'{obj.discount_percent} %'\n", "issue": "\u0410\u0434\u043c\u0438\u043d\u043a\u0430: \u043f\u043e-\u0443\u043c\u043e\u043b\u0447\u0430\u043d\u0438\u044e \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u0432\u043a\u043b\u044e\u0447\u0435\u043d\u043d\u044b\u0435 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u044b\n\u0423 \u043d\u0430\u0441 \u043d\u0430\u043a\u043e\u043f\u0438\u043b\u0430\u0441\u044c \u043e\u0433\u0440\u043e\u043c\u043d\u0430\u044f \u0433\u043e\u0440\u0430 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u043e\u0432, \u0438 \u0442\u0435\u043f\u0435\u0440\u044c \u043f\u0440\u0438 \u043f\u0435\u0440\u0432\u043e\u043c \u0432\u0445\u043e\u0434\u0435 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0441\u043b\u043e\u0436\u043d\u043e \u043e\u0440\u0438\u0435\u043d\u0442\u0438\u0440\u043e\u0432\u0430\u0442\u044c\u0441\u044f. \u041d\u0443\u0436\u043d\u043e \u0441\u0434\u0435\u043b\u0430\u0442\u044c, \u0447\u0442\u043e\u0431\u044b \u043f\u043e-\u0443\u043c\u043e\u043b\u0447\u0430\u043d\u0438\u044e \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435 \u043f\u043e\u043a\u0430\u0437\u044b\u0432\u0430\u043b\u0438\u0441\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u0432\u043a\u043b\u044e\u0447\u0435\u043d\u043d\u044b\u0435 \u043f\u0440\u043e\u043c\u043e\u043a\u043e\u0434\u044b.\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass BooleanFilter(admin.SimpleListFilter):\n \"\"\"\n Abstract base class for simple boolean filter in admin. You should define only\n `title`, unique `parameter_name` and two methods: `t` and `f`, returning a queryset\n when filter is set to True and False respectively:\n class HasClassesFilter(BooleanFilter):\n title = _('Has classes')\n parameter_name = 'has_classes'\n def t(self, request, queryset):\n return queryset.filter(classes__isnull=False).distinct('pk')\n def n(self, request, queryset):\n return queryset.filter(classes__isnull=True)\n \"\"\"\n def lookups(self, request, model_admin):\n return (\n ('t', _('Yes')),\n ('f', _('No')),\n )\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n else:\n if self.value() == 't':\n return self.t(request, queryset)\n else:\n return self.f(request, queryset)\n", "path": "src/app/admin/filters.py"}, {"content": "from django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom app.admin import ModelAdmin, admin\nfrom orders.admin.promocodes import actions\nfrom orders.models import PromoCode\n\n\[email protected](PromoCode)\nclass PromoCodeAdmin(ModelAdmin):\n list_display = [\n 'id',\n 'name',\n 'discount_percent',\n 'discount_value',\n 'order_count',\n 'comment',\n 'active',\n ]\n\n list_editable = [\n 'active',\n ]\n\n list_filter = [\n 'active',\n ]\n\n actions = [actions.deactivate]\n\n def get_queryset(self, request):\n return super().get_queryset(request) \\\n .with_order_count()\n\n @mark_safe\n @admin.display(description=_('Order count'), ordering='order_count')\n def order_count(self, obj=None):\n if hasattr(obj, 'order_count') and obj.order_count:\n orders_url = reverse('admin:orders_order_changelist')\n return f'<a href=\"{orders_url}?is_paid=t&promocode_id={obj.id}\">{obj.order_count}</a>'\n\n return '\u2014'\n", "path": "src/orders/admin/promocodes/admin.py"}]}
| 1,260 | 862 |
gh_patches_debug_56268
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-4910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Validate profile fields on form
Related code
https://github.com/rtfd/readthedocs.org/blob/164800694a25d769234c6e7019c483f347fe9226/readthedocs/core/forms.py#L20-L46
This will raise an exception if the length is greater than the model
Sentry issue https://sentry.io/read-the-docs/readthedocs-org/issues/666774301/
</issue>
<code>
[start of readthedocs/core/forms.py]
1 # -*- coding: utf-8 -*-
2 """Forms for core app."""
3
4 from __future__ import (
5 absolute_import, division, print_function, unicode_literals)
6
7 import logging
8 from builtins import object
9
10 from django import forms
11 from django.contrib.auth.models import User
12 from django.forms.fields import CharField
13 from django.utils.translation import ugettext_lazy as _
14
15 from .models import UserProfile
16
17 log = logging.getLogger(__name__)
18
19
20 class UserProfileForm(forms.ModelForm):
21 first_name = CharField(label=_('First name'), required=False)
22 last_name = CharField(label=_('Last name'), required=False)
23
24 class Meta(object):
25 model = UserProfile
26 # Don't allow users edit someone else's user page
27 fields = ['first_name', 'last_name', 'homepage']
28
29 def __init__(self, *args, **kwargs):
30 super(UserProfileForm, self).__init__(*args, **kwargs)
31 try:
32 self.fields['first_name'].initial = self.instance.user.first_name
33 self.fields['last_name'].initial = self.instance.user.last_name
34 except AttributeError:
35 pass
36
37 def save(self, commit=True):
38 first_name = self.cleaned_data.pop('first_name', None)
39 last_name = self.cleaned_data.pop('last_name', None)
40 profile = super(UserProfileForm, self).save(commit=commit)
41 if commit:
42 user = profile.user
43 user.first_name = first_name
44 user.last_name = last_name
45 user.save()
46 return profile
47
48
49 class UserDeleteForm(forms.ModelForm):
50 username = CharField(
51 label=_('Username'),
52 help_text=_('Please type your username to confirm.'),
53 )
54
55 class Meta(object):
56 model = User
57 fields = ['username']
58
59 def clean_username(self):
60 data = self.cleaned_data['username']
61
62 if self.instance.username != data:
63 raise forms.ValidationError(_('Username does not match!'))
64
65 return data
66
67
68 class UserAdvertisingForm(forms.ModelForm):
69 class Meta(object):
70 model = UserProfile
71 fields = ['allow_ads']
72
73
74 class FacetField(forms.MultipleChoiceField):
75
76 """
77 For filtering searches on a facet.
78
79 Has validation for the format of facet values.
80 """
81
82 def valid_value(self, value):
83 """
84 Although this is a choice field, no choices need to be supplied.
85
86 Instead, we just validate that the value is in the correct format for
87 facet filtering (facet_name:value)
88 """
89 if ':' not in value:
90 return False
91 return True
92
[end of readthedocs/core/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/core/forms.py b/readthedocs/core/forms.py
--- a/readthedocs/core/forms.py
+++ b/readthedocs/core/forms.py
@@ -18,8 +18,8 @@
class UserProfileForm(forms.ModelForm):
- first_name = CharField(label=_('First name'), required=False)
- last_name = CharField(label=_('Last name'), required=False)
+ first_name = CharField(label=_('First name'), required=False, max_length=30)
+ last_name = CharField(label=_('Last name'), required=False, max_length=30)
class Meta(object):
model = UserProfile
|
{"golden_diff": "diff --git a/readthedocs/core/forms.py b/readthedocs/core/forms.py\n--- a/readthedocs/core/forms.py\n+++ b/readthedocs/core/forms.py\n@@ -18,8 +18,8 @@\n \n \n class UserProfileForm(forms.ModelForm):\n- first_name = CharField(label=_('First name'), required=False)\n- last_name = CharField(label=_('Last name'), required=False)\n+ first_name = CharField(label=_('First name'), required=False, max_length=30)\n+ last_name = CharField(label=_('Last name'), required=False, max_length=30)\n \n class Meta(object):\n model = UserProfile\n", "issue": "Validate profile fields on form\nRelated code\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/164800694a25d769234c6e7019c483f347fe9226/readthedocs/core/forms.py#L20-L46\r\n\r\nThis will raise an exception if the length is greater than the model\r\n\r\nSentry issue https://sentry.io/read-the-docs/readthedocs-org/issues/666774301/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Forms for core app.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\nfrom builtins import object\n\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.forms.fields import CharField\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import UserProfile\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n first_name = CharField(label=_('First name'), required=False)\n last_name = CharField(label=_('Last name'), required=False)\n\n class Meta(object):\n model = UserProfile\n # Don't allow users edit someone else's user page\n fields = ['first_name', 'last_name', 'homepage']\n\n def __init__(self, *args, **kwargs):\n super(UserProfileForm, self).__init__(*args, **kwargs)\n try:\n self.fields['first_name'].initial = self.instance.user.first_name\n self.fields['last_name'].initial = self.instance.user.last_name\n except AttributeError:\n pass\n\n def save(self, commit=True):\n first_name = self.cleaned_data.pop('first_name', None)\n last_name = self.cleaned_data.pop('last_name', None)\n profile = super(UserProfileForm, self).save(commit=commit)\n if commit:\n user = profile.user\n user.first_name = first_name\n user.last_name = last_name\n user.save()\n return profile\n\n\nclass UserDeleteForm(forms.ModelForm):\n username = CharField(\n label=_('Username'),\n help_text=_('Please type your username to confirm.'),\n )\n\n class Meta(object):\n model = User\n fields = ['username']\n\n def clean_username(self):\n data = self.cleaned_data['username']\n\n if self.instance.username != data:\n raise forms.ValidationError(_('Username does not match!'))\n\n return data\n\n\nclass UserAdvertisingForm(forms.ModelForm):\n class Meta(object):\n model = UserProfile\n fields = ['allow_ads']\n\n\nclass FacetField(forms.MultipleChoiceField):\n\n \"\"\"\n For filtering searches on a facet.\n\n Has validation for the format of facet values.\n \"\"\"\n\n def valid_value(self, value):\n \"\"\"\n Although this is a choice field, no choices need to be supplied.\n\n Instead, we just validate that the value is in the correct format for\n facet filtering (facet_name:value)\n \"\"\"\n if ':' not in value:\n return False\n return True\n", "path": "readthedocs/core/forms.py"}]}
| 1,369 | 138 |
gh_patches_debug_34769
|
rasdani/github-patches
|
git_diff
|
napari__napari-3016
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Opacity slider label should be between 0 and 1
## 🐛 Bug
Opacity slider label should be between 0 and 1 not 0 and 100. This will remove need for normalization on slider https://github.com/napari/napari/blob/aade148d8e5cb339bb2981ab4d1081ae5d2747e0/napari/_qt/layer_controls/qt_layer_controls_base.py#L79

</issue>
<code>
[start of napari/_qt/layer_controls/qt_layer_controls_base.py]
1 from qtpy.QtCore import Qt
2 from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout
3 from superqt import QLabeledSlider as QSlider
4
5 from ...layers.base._base_constants import BLENDING_TRANSLATIONS
6 from ...utils.events import disconnect_events
7
8
9 class QtLayerControls(QFrame):
10 """Superclass for all the other LayerControl classes.
11
12 This class is never directly instantiated anywhere.
13
14 Parameters
15 ----------
16 layer : napari.layers.Layer
17 An instance of a napari layer.
18
19 Attributes
20 ----------
21 blendComboBox : qtpy.QtWidgets.QComboBox
22 Drowpdown widget to select blending mode of layer.
23 grid_layout : qtpy.QtWidgets.QGridLayout
24 Layout of Qt widget controls for the layer.
25 layer : napari.layers.Layer
26 An instance of a napari layer.
27 opacitySlider : qtpy.QtWidgets.QSlider
28 Slider controlling opacity of the layer.
29 """
30
31 def __init__(self, layer):
32 super().__init__()
33
34 self.layer = layer
35 self.layer.events.blending.connect(self._on_blending_change)
36 self.layer.events.opacity.connect(self._on_opacity_change)
37
38 self.setAttribute(Qt.WA_DeleteOnClose)
39
40 self.setObjectName('layer')
41 self.setMouseTracking(True)
42
43 self.grid_layout = QGridLayout(self)
44 self.grid_layout.setContentsMargins(0, 0, 0, 0)
45 self.grid_layout.setSpacing(2)
46 self.grid_layout.setColumnMinimumWidth(0, 86)
47 self.grid_layout.setColumnStretch(1, 1)
48 self.setLayout(self.grid_layout)
49
50 sld = QSlider(Qt.Horizontal, parent=self)
51 sld.setFocusPolicy(Qt.NoFocus)
52 sld.setMinimum(0)
53 sld.setMaximum(100)
54 sld.setSingleStep(1)
55 sld.valueChanged.connect(self.changeOpacity)
56 self.opacitySlider = sld
57 self._on_opacity_change()
58
59 blend_comboBox = QComboBox(self)
60 for index, (data, text) in enumerate(BLENDING_TRANSLATIONS.items()):
61 data = data.value
62 blend_comboBox.addItem(text, data)
63 if data == self.layer.blending:
64 blend_comboBox.setCurrentIndex(index)
65
66 blend_comboBox.activated[str].connect(self.changeBlending)
67 self.blendComboBox = blend_comboBox
68
69 def changeOpacity(self, value):
70 """Change opacity value on the layer model.
71
72 Parameters
73 ----------
74 value : float
75 Opacity value for shapes.
76 Input range 0 - 100 (transparent to fully opaque).
77 """
78 with self.layer.events.blocker(self._on_opacity_change):
79 self.layer.opacity = value / 100
80
81 def changeBlending(self, text):
82 """Change blending mode on the layer model.
83
84 Parameters
85 ----------
86 text : str
87 Name of blending mode, eg: 'translucent', 'additive', 'opaque'.
88 """
89 self.layer.blending = self.blendComboBox.currentData()
90
91 def _on_opacity_change(self, event=None):
92 """Receive layer model opacity change event and update opacity slider.
93
94 Parameters
95 ----------
96 event : napari.utils.event.Event, optional
97 The napari event that triggered this method, by default None.
98 """
99 with self.layer.events.opacity.blocker():
100 self.opacitySlider.setValue(int(self.layer.opacity * 100))
101
102 def _on_blending_change(self, event=None):
103 """Receive layer model blending mode change event and update slider.
104
105 Parameters
106 ----------
107 event : napari.utils.event.Event, optional
108 The napari event that triggered this method, by default None.
109 """
110 with self.layer.events.blending.blocker():
111 self.blendComboBox.setCurrentIndex(
112 self.blendComboBox.findData(self.layer.blending)
113 )
114
115 def close(self):
116 """Disconnect events when widget is closing."""
117 disconnect_events(self.layer.events, self)
118 for child in self.children():
119 close_method = getattr(child, 'close', None)
120 if close_method is not None:
121 close_method()
122 super().close()
123
[end of napari/_qt/layer_controls/qt_layer_controls_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/_qt/layer_controls/qt_layer_controls_base.py b/napari/_qt/layer_controls/qt_layer_controls_base.py
--- a/napari/_qt/layer_controls/qt_layer_controls_base.py
+++ b/napari/_qt/layer_controls/qt_layer_controls_base.py
@@ -1,6 +1,6 @@
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout
-from superqt import QLabeledSlider as QSlider
+from superqt import QLabeledDoubleSlider
from ...layers.base._base_constants import BLENDING_TRANSLATIONS
from ...utils.events import disconnect_events
@@ -47,11 +47,11 @@
self.grid_layout.setColumnStretch(1, 1)
self.setLayout(self.grid_layout)
- sld = QSlider(Qt.Horizontal, parent=self)
+ sld = QLabeledDoubleSlider(Qt.Horizontal, parent=self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setMinimum(0)
- sld.setMaximum(100)
- sld.setSingleStep(1)
+ sld.setMaximum(1)
+ sld.setSingleStep(0.01)
sld.valueChanged.connect(self.changeOpacity)
self.opacitySlider = sld
self._on_opacity_change()
@@ -76,7 +76,7 @@
Input range 0 - 100 (transparent to fully opaque).
"""
with self.layer.events.blocker(self._on_opacity_change):
- self.layer.opacity = value / 100
+ self.layer.opacity = value
def changeBlending(self, text):
"""Change blending mode on the layer model.
@@ -97,7 +97,7 @@
The napari event that triggered this method, by default None.
"""
with self.layer.events.opacity.blocker():
- self.opacitySlider.setValue(int(self.layer.opacity * 100))
+ self.opacitySlider.setValue(self.layer.opacity)
def _on_blending_change(self, event=None):
"""Receive layer model blending mode change event and update slider.
|
{"golden_diff": "diff --git a/napari/_qt/layer_controls/qt_layer_controls_base.py b/napari/_qt/layer_controls/qt_layer_controls_base.py\n--- a/napari/_qt/layer_controls/qt_layer_controls_base.py\n+++ b/napari/_qt/layer_controls/qt_layer_controls_base.py\n@@ -1,6 +1,6 @@\n from qtpy.QtCore import Qt\n from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout\n-from superqt import QLabeledSlider as QSlider\n+from superqt import QLabeledDoubleSlider\n \n from ...layers.base._base_constants import BLENDING_TRANSLATIONS\n from ...utils.events import disconnect_events\n@@ -47,11 +47,11 @@\n self.grid_layout.setColumnStretch(1, 1)\n self.setLayout(self.grid_layout)\n \n- sld = QSlider(Qt.Horizontal, parent=self)\n+ sld = QLabeledDoubleSlider(Qt.Horizontal, parent=self)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.setMinimum(0)\n- sld.setMaximum(100)\n- sld.setSingleStep(1)\n+ sld.setMaximum(1)\n+ sld.setSingleStep(0.01)\n sld.valueChanged.connect(self.changeOpacity)\n self.opacitySlider = sld\n self._on_opacity_change()\n@@ -76,7 +76,7 @@\n Input range 0 - 100 (transparent to fully opaque).\n \"\"\"\n with self.layer.events.blocker(self._on_opacity_change):\n- self.layer.opacity = value / 100\n+ self.layer.opacity = value\n \n def changeBlending(self, text):\n \"\"\"Change blending mode on the layer model.\n@@ -97,7 +97,7 @@\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.opacity.blocker():\n- self.opacitySlider.setValue(int(self.layer.opacity * 100))\n+ self.opacitySlider.setValue(self.layer.opacity)\n \n def _on_blending_change(self, event=None):\n \"\"\"Receive layer model blending mode change event and update slider.\n", "issue": "Opacity slider label should be between 0 and 1\n## \ud83d\udc1b Bug\r\n\r\nOpacity slider label should be between 0 and 1 not 0 and 100. This will remove need for normalization on slider https://github.com/napari/napari/blob/aade148d8e5cb339bb2981ab4d1081ae5d2747e0/napari/_qt/layer_controls/qt_layer_controls_base.py#L79\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QComboBox, QFrame, QGridLayout\nfrom superqt import QLabeledSlider as QSlider\n\nfrom ...layers.base._base_constants import BLENDING_TRANSLATIONS\nfrom ...utils.events import disconnect_events\n\n\nclass QtLayerControls(QFrame):\n \"\"\"Superclass for all the other LayerControl classes.\n\n This class is never directly instantiated anywhere.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n An instance of a napari layer.\n\n Attributes\n ----------\n blendComboBox : qtpy.QtWidgets.QComboBox\n Drowpdown widget to select blending mode of layer.\n grid_layout : qtpy.QtWidgets.QGridLayout\n Layout of Qt widget controls for the layer.\n layer : napari.layers.Layer\n An instance of a napari layer.\n opacitySlider : qtpy.QtWidgets.QSlider\n Slider controlling opacity of the layer.\n \"\"\"\n\n def __init__(self, layer):\n super().__init__()\n\n self.layer = layer\n self.layer.events.blending.connect(self._on_blending_change)\n self.layer.events.opacity.connect(self._on_opacity_change)\n\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n self.setObjectName('layer')\n self.setMouseTracking(True)\n\n self.grid_layout = QGridLayout(self)\n self.grid_layout.setContentsMargins(0, 0, 0, 0)\n self.grid_layout.setSpacing(2)\n self.grid_layout.setColumnMinimumWidth(0, 86)\n self.grid_layout.setColumnStretch(1, 1)\n self.setLayout(self.grid_layout)\n\n sld = QSlider(Qt.Horizontal, parent=self)\n sld.setFocusPolicy(Qt.NoFocus)\n sld.setMinimum(0)\n sld.setMaximum(100)\n sld.setSingleStep(1)\n sld.valueChanged.connect(self.changeOpacity)\n self.opacitySlider = sld\n self._on_opacity_change()\n\n blend_comboBox = QComboBox(self)\n for index, (data, text) in enumerate(BLENDING_TRANSLATIONS.items()):\n data = data.value\n blend_comboBox.addItem(text, data)\n if data == self.layer.blending:\n blend_comboBox.setCurrentIndex(index)\n\n blend_comboBox.activated[str].connect(self.changeBlending)\n self.blendComboBox = blend_comboBox\n\n def changeOpacity(self, value):\n \"\"\"Change opacity value on the layer model.\n\n Parameters\n ----------\n value : float\n Opacity value for shapes.\n Input range 0 - 100 (transparent to fully opaque).\n \"\"\"\n with self.layer.events.blocker(self._on_opacity_change):\n self.layer.opacity = value / 100\n\n def changeBlending(self, text):\n \"\"\"Change blending mode on the layer model.\n\n Parameters\n ----------\n text : str\n Name of blending mode, eg: 'translucent', 'additive', 'opaque'.\n \"\"\"\n self.layer.blending = self.blendComboBox.currentData()\n\n def _on_opacity_change(self, event=None):\n \"\"\"Receive layer model opacity change event and update opacity slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.opacity.blocker():\n self.opacitySlider.setValue(int(self.layer.opacity * 100))\n\n def _on_blending_change(self, event=None):\n \"\"\"Receive layer model blending mode change event and update slider.\n\n Parameters\n ----------\n event : napari.utils.event.Event, optional\n The napari event that triggered this method, by default None.\n \"\"\"\n with self.layer.events.blending.blocker():\n self.blendComboBox.setCurrentIndex(\n self.blendComboBox.findData(self.layer.blending)\n )\n\n def close(self):\n \"\"\"Disconnect events when widget is closing.\"\"\"\n disconnect_events(self.layer.events, self)\n for child in self.children():\n close_method = getattr(child, 'close', None)\n if close_method is not None:\n close_method()\n super().close()\n", "path": "napari/_qt/layer_controls/qt_layer_controls_base.py"}]}
| 1,869 | 461 |
gh_patches_debug_31134
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-1535
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plugin DlProtectCom doesn't work
Trying to download http://www.dl-protect.com/2C964B88 gives the rror 'NoneType' object has no attribute 'group' 0.00 B
</issue>
<code>
[start of module/plugins/crypter/DlProtectCom.py]
1 # -*- coding: utf-8 -*-
2
3 import re
4 import time
5
6 from base64 import urlsafe_b64encode
7
8 from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
9
10
11 class DlProtectCom(SimpleCrypter):
12 __name__ = "DlProtectCom"
13 __type__ = "crypter"
14 __version__ = "0.03"
15
16 __pattern__ = r'https?://(?:www\.)?dl-protect\.com/((en|fr)/)?\w+'
17 __config__ = [("use_premium" , "bool", "Use premium account if available" , True),
18 ("use_subfolder" , "bool", "Save package to subfolder" , True),
19 ("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
20
21 __description__ = """Dl-protect.com decrypter plugin"""
22 __license__ = "GPLv3"
23 __authors__ = [("Walter Purcaro", "[email protected]")]
24
25
26 COOKIES = [("dl-protect.com", "l", "en")]
27
28 OFFLINE_PATTERN = r'Unfortunately, the link you are looking for is not found'
29
30
31 def getLinks(self):
32 # Direct link with redirect
33 if not re.match(r"https?://(?:www\.)?dl-protect\.com/.+", self.req.http.lastEffectiveURL):
34 return [self.req.http.lastEffectiveURL]
35
36 post_req = {'key' : re.search(r'name="key" value="(.+?)"', self.html).group(1),
37 'submitform': ""}
38
39 if "Please click on continue to see the content" in self.html:
40 post_req['submitform'] = "Continue"
41 self.wait(2)
42
43 else:
44 mstime = int(round(time.time() * 1000))
45 b64time = "_" + urlsafe_b64encode(str(mstime)).replace("=", "%3D")
46
47 post_req.update({'i' : b64time,
48 'submitform': "Decrypt+link"})
49
50 if "Password :" in self.html:
51 post_req['pwd'] = self.getPassword()
52
53 if "Security Code" in self.html:
54 captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
55 captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
56 captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
57
58 post_req['secure'] = captcha_code
59
60 self.html = self.load(self.pyfile.url, post=post_req)
61
62 for errmsg in ("The password is incorrect", "The security code is incorrect"):
63 if errmsg in self.html:
64 self.fail(_(errmsg[1:]))
65
66 return re.findall(r'<a href="([^/].+?)" target="_blank">', self.html)
67
68
69 getInfo = create_getInfo(DlProtectCom)
70
[end of module/plugins/crypter/DlProtectCom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/module/plugins/crypter/DlProtectCom.py b/module/plugins/crypter/DlProtectCom.py
--- a/module/plugins/crypter/DlProtectCom.py
+++ b/module/plugins/crypter/DlProtectCom.py
@@ -11,7 +11,7 @@
class DlProtectCom(SimpleCrypter):
__name__ = "DlProtectCom"
__type__ = "crypter"
- __version__ = "0.03"
+ __version__ = "0.04"
__pattern__ = r'https?://(?:www\.)?dl-protect\.com/((en|fr)/)?\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
@@ -36,7 +36,7 @@
post_req = {'key' : re.search(r'name="key" value="(.+?)"', self.html).group(1),
'submitform': ""}
- if "Please click on continue to see the content" in self.html:
+ if "Please click on continue to see the links" in self.html:
post_req['submitform'] = "Continue"
self.wait(2)
@@ -51,11 +51,10 @@
post_req['pwd'] = self.getPassword()
if "Security Code" in self.html:
- captcha_id = re.search(r'/captcha\.php\?uid=(.+?)"', self.html).group(1)
- captcha_url = "http://www.dl-protect.com/captcha.php?uid=" + captcha_id
- captcha_code = self.decryptCaptcha(captcha_url, imgtype="gif")
-
- post_req['secure'] = captcha_code
+ m = re.search(r'/captcha\.php\?key=(.+?)"', self.html)
+ if m:
+ captcha_code = self.decryptCaptcha("http://www.dl-protect.com/captcha.php?key=" + m.group(1), imgtype="gif")
+ post_req['secure'] = captcha_code
self.html = self.load(self.pyfile.url, post=post_req)
|
{"golden_diff": "diff --git a/module/plugins/crypter/DlProtectCom.py b/module/plugins/crypter/DlProtectCom.py\n--- a/module/plugins/crypter/DlProtectCom.py\n+++ b/module/plugins/crypter/DlProtectCom.py\n@@ -11,7 +11,7 @@\n class DlProtectCom(SimpleCrypter):\n __name__ = \"DlProtectCom\"\n __type__ = \"crypter\"\n- __version__ = \"0.03\"\n+ __version__ = \"0.04\"\n \n __pattern__ = r'https?://(?:www\\.)?dl-protect\\.com/((en|fr)/)?\\w+'\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n@@ -36,7 +36,7 @@\n post_req = {'key' : re.search(r'name=\"key\" value=\"(.+?)\"', self.html).group(1),\n 'submitform': \"\"}\n \n- if \"Please click on continue to see the content\" in self.html:\n+ if \"Please click on continue to see the links\" in self.html:\n post_req['submitform'] = \"Continue\"\n self.wait(2)\n \n@@ -51,11 +51,10 @@\n post_req['pwd'] = self.getPassword()\n \n if \"Security Code\" in self.html:\n- captcha_id = re.search(r'/captcha\\.php\\?uid=(.+?)\"', self.html).group(1)\n- captcha_url = \"http://www.dl-protect.com/captcha.php?uid=\" + captcha_id\n- captcha_code = self.decryptCaptcha(captcha_url, imgtype=\"gif\")\n-\n- post_req['secure'] = captcha_code\n+ m = re.search(r'/captcha\\.php\\?key=(.+?)\"', self.html)\n+ if m:\n+ captcha_code = self.decryptCaptcha(\"http://www.dl-protect.com/captcha.php?key=\" + m.group(1), imgtype=\"gif\")\n+ post_req['secure'] = captcha_code\n \n self.html = self.load(self.pyfile.url, post=post_req)\n", "issue": "Plugin DlProtectCom doesn't work\nTrying to download http://www.dl-protect.com/2C964B88 gives the rror 'NoneType' object has no attribute 'group' 0.00 B\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport time\n\nfrom base64 import urlsafe_b64encode\n\nfrom module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo\n\n\nclass DlProtectCom(SimpleCrypter):\n __name__ = \"DlProtectCom\"\n __type__ = \"crypter\"\n __version__ = \"0.03\"\n\n __pattern__ = r'https?://(?:www\\.)?dl-protect\\.com/((en|fr)/)?\\w+'\n __config__ = [(\"use_premium\" , \"bool\", \"Use premium account if available\" , True),\n (\"use_subfolder\" , \"bool\", \"Save package to subfolder\" , True),\n (\"subfolder_per_pack\", \"bool\", \"Create a subfolder for each package\", True)]\n\n __description__ = \"\"\"Dl-protect.com decrypter plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n COOKIES = [(\"dl-protect.com\", \"l\", \"en\")]\n\n OFFLINE_PATTERN = r'Unfortunately, the link you are looking for is not found'\n\n\n def getLinks(self):\n # Direct link with redirect\n if not re.match(r\"https?://(?:www\\.)?dl-protect\\.com/.+\", self.req.http.lastEffectiveURL):\n return [self.req.http.lastEffectiveURL]\n\n post_req = {'key' : re.search(r'name=\"key\" value=\"(.+?)\"', self.html).group(1),\n 'submitform': \"\"}\n\n if \"Please click on continue to see the content\" in self.html:\n post_req['submitform'] = \"Continue\"\n self.wait(2)\n\n else:\n mstime = int(round(time.time() * 1000))\n b64time = \"_\" + urlsafe_b64encode(str(mstime)).replace(\"=\", \"%3D\")\n\n post_req.update({'i' : b64time,\n 'submitform': \"Decrypt+link\"})\n\n if \"Password :\" in self.html:\n post_req['pwd'] = self.getPassword()\n\n if \"Security Code\" in self.html:\n captcha_id = re.search(r'/captcha\\.php\\?uid=(.+?)\"', self.html).group(1)\n captcha_url = \"http://www.dl-protect.com/captcha.php?uid=\" + captcha_id\n captcha_code = self.decryptCaptcha(captcha_url, imgtype=\"gif\")\n\n post_req['secure'] = captcha_code\n\n self.html = self.load(self.pyfile.url, post=post_req)\n\n for errmsg in (\"The password is incorrect\", \"The security code is incorrect\"):\n if errmsg in self.html:\n self.fail(_(errmsg[1:]))\n\n return re.findall(r'<a href=\"([^/].+?)\" target=\"_blank\">', self.html)\n\n\ngetInfo = create_getInfo(DlProtectCom)\n", "path": "module/plugins/crypter/DlProtectCom.py"}]}
| 1,395 | 484 |
gh_patches_debug_13065
|
rasdani/github-patches
|
git_diff
|
openai__gym-2646
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug Report] AttributeError: 'Discrete' object has no attribute 'start'
**Describe the bug**
Change in https://github.com/openai/gym/pull/2470 introduced a bug when loading pre-trained agents with previous version of gym.
Fix is probably similar to https://github.com/DLR-RM/stable-baselines3/issues/573 ...
**Code example**
from RL Zoo CI
See https://github.com/DLR-RM/rl-baselines3-zoo/pull/210 (note: the CI now passes because I downgraded to gym 0.21)
and
https://github.com/DLR-RM/rl-baselines3-zoo/runs/5305883843?check_suite_focus=true
```
python enjoy --algo qrdqn --env Acrobot-v1
```
traceback:
```
Loading rl-trained-agents/qrdqn/Acrobot-v1_1/Acrobot-v1.zip
----------------------------- Captured stderr call -----------------------------
/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/save_util.py:166: UserWarning: Could not deserialize object exploration_schedule. Consider using `custom_objects` argument to replace this object.
warnings.warn(
Traceback (most recent call last):
File "/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py", line 248, in <module>
main()
File "/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py", line 178, in main
model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, **kwargs)
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/base_class.py", line 709, in load
check_for_correct_spaces(env, data["observation_space"], data["action_space"])
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/utils.py", line 224, in check_for_correct_spaces
if action_space != env.action_space:
File "/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/gym/spaces/discrete.py", line 50, in __eq__
and self.start == other.start
AttributeError: 'Discrete' object has no attribute 'start'
```
**System Info**
Gym 0.22
**Additional context**
Add any other context about the problem here.
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
</issue>
<code>
[start of gym/spaces/discrete.py]
1 from typing import Optional
2
3 import numpy as np
4 from .space import Space
5
6
7 class Discrete(Space[int]):
8 r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`.
9
10 A start value can be optionally specified to shift the range
11 to :math:`\{ a, a+1, \\dots, a+n-1 \}`.
12
13 Example::
14
15 >>> Discrete(2)
16 >>> Discrete(3, start=-1) # {-1, 0, 1}
17
18 """
19
20 def __init__(self, n: int, seed: Optional[int] = None, start: int = 0):
21 assert n > 0, "n (counts) have to be positive"
22 assert isinstance(start, (int, np.integer))
23 self.n = int(n)
24 self.start = int(start)
25 super().__init__((), np.int64, seed)
26
27 def sample(self) -> int:
28 return self.start + self.np_random.randint(self.n)
29
30 def contains(self, x) -> bool:
31 if isinstance(x, int):
32 as_int = x
33 elif isinstance(x, (np.generic, np.ndarray)) and (
34 x.dtype.char in np.typecodes["AllInteger"] and x.shape == ()
35 ):
36 as_int = int(x) # type: ignore
37 else:
38 return False
39 return self.start <= as_int < self.start + self.n
40
41 def __repr__(self) -> str:
42 if self.start != 0:
43 return "Discrete(%d, start=%d)" % (self.n, self.start)
44 return "Discrete(%d)" % self.n
45
46 def __eq__(self, other) -> bool:
47 return (
48 isinstance(other, Discrete)
49 and self.n == other.n
50 and self.start == other.start
51 )
52
[end of gym/spaces/discrete.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gym/spaces/discrete.py b/gym/spaces/discrete.py
--- a/gym/spaces/discrete.py
+++ b/gym/spaces/discrete.py
@@ -12,7 +12,7 @@
Example::
- >>> Discrete(2)
+ >>> Discrete(2) # {0, 1}
>>> Discrete(3, start=-1) # {-1, 0, 1}
"""
@@ -49,3 +49,17 @@
and self.n == other.n
and self.start == other.start
)
+
+ def __setstate__(self, state):
+ super().__setstate__(state)
+
+ # Don't mutate the original state
+ state = dict(state)
+
+ # Allow for loading of legacy states.
+ # See https://github.com/openai/gym/pull/2470
+ if "start" not in state:
+ state["start"] = 0
+
+ # Update our state
+ self.__dict__.update(state)
|
{"golden_diff": "diff --git a/gym/spaces/discrete.py b/gym/spaces/discrete.py\n--- a/gym/spaces/discrete.py\n+++ b/gym/spaces/discrete.py\n@@ -12,7 +12,7 @@\n \n Example::\n \n- >>> Discrete(2)\n+ >>> Discrete(2) # {0, 1}\n >>> Discrete(3, start=-1) # {-1, 0, 1}\n \n \"\"\"\n@@ -49,3 +49,17 @@\n and self.n == other.n\n and self.start == other.start\n )\n+\n+ def __setstate__(self, state):\n+ super().__setstate__(state)\n+\n+ # Don't mutate the original state\n+ state = dict(state)\n+\n+ # Allow for loading of legacy states.\n+ # See https://github.com/openai/gym/pull/2470\n+ if \"start\" not in state:\n+ state[\"start\"] = 0\n+\n+ # Update our state\n+ self.__dict__.update(state)\n", "issue": "[Bug Report] AttributeError: 'Discrete' object has no attribute 'start'\n**Describe the bug**\r\nChange in https://github.com/openai/gym/pull/2470 introduced a bug when loading pre-trained agents with previous version of gym.\r\n\r\nFix is probably similar to https://github.com/DLR-RM/stable-baselines3/issues/573 ...\r\n\r\n\r\n**Code example**\r\nfrom RL Zoo CI\r\n\r\nSee https://github.com/DLR-RM/rl-baselines3-zoo/pull/210 (note: the CI now passes because I downgraded to gym 0.21)\r\nand\r\nhttps://github.com/DLR-RM/rl-baselines3-zoo/runs/5305883843?check_suite_focus=true\r\n\r\n```\r\npython enjoy --algo qrdqn --env Acrobot-v1\r\n```\r\n\r\ntraceback:\r\n```\r\n Loading rl-trained-agents/qrdqn/Acrobot-v1_1/Acrobot-v1.zip\r\n----------------------------- Captured stderr call -----------------------------\r\n/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/save_util.py:166: UserWarning: Could not deserialize object exploration_schedule. Consider using `custom_objects` argument to replace this object.\r\n warnings.warn(\r\nTraceback (most recent call last):\r\n File \"/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py\", line 248, in <module>\r\n main()\r\n File \"/home/runner/work/rl-baselines3-zoo/rl-baselines3-zoo/enjoy.py\", line 178, in main\r\n model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, **kwargs)\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/base_class.py\", line 709, in load\r\n check_for_correct_spaces(env, data[\"observation_space\"], data[\"action_space\"])\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/stable_baselines3/common/utils.py\", line 224, in check_for_correct_spaces\r\n if action_space != env.action_space:\r\n File \"/opt/hostedtoolcache/Python/3.9.10/x64/lib/python3.9/site-packages/gym/spaces/discrete.py\", line 50, in __eq__\r\n and self.start == other.start\r\nAttributeError: 'Discrete' object has no attribute 'start'\r\n```\r\n\r\n**System Info**\r\nGym 0.22\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\nfrom .space import Space\n\n\nclass Discrete(Space[int]):\n r\"\"\"A discrete space in :math:`\\{ 0, 1, \\\\dots, n-1 \\}`.\n\n A start value can be optionally specified to shift the range\n to :math:`\\{ a, a+1, \\\\dots, a+n-1 \\}`.\n\n Example::\n\n >>> Discrete(2)\n >>> Discrete(3, start=-1) # {-1, 0, 1}\n\n \"\"\"\n\n def __init__(self, n: int, seed: Optional[int] = None, start: int = 0):\n assert n > 0, \"n (counts) have to be positive\"\n assert isinstance(start, (int, np.integer))\n self.n = int(n)\n self.start = int(start)\n super().__init__((), np.int64, seed)\n\n def sample(self) -> int:\n return self.start + self.np_random.randint(self.n)\n\n def contains(self, x) -> bool:\n if isinstance(x, int):\n as_int = x\n elif isinstance(x, (np.generic, np.ndarray)) and (\n x.dtype.char in np.typecodes[\"AllInteger\"] and x.shape == ()\n ):\n as_int = int(x) # type: ignore\n else:\n return False\n return self.start <= as_int < self.start + self.n\n\n def __repr__(self) -> str:\n if self.start != 0:\n return \"Discrete(%d, start=%d)\" % (self.n, self.start)\n return \"Discrete(%d)\" % self.n\n\n def __eq__(self, other) -> bool:\n return (\n isinstance(other, Discrete)\n and self.n == other.n\n and self.start == other.start\n )\n", "path": "gym/spaces/discrete.py"}]}
| 1,693 | 244 |
gh_patches_debug_34565
|
rasdani/github-patches
|
git_diff
|
pallets__click-1328
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
complex example - misleading name for context
The name `Context` and `pass_context` are misleading in the complex example, since the `Context` defined in the example shares a name with the click `Context`. Maybe a different name such as "Environment" or "Options" would be more appropriate.
</issue>
<code>
[start of examples/complex/complex/commands/cmd_status.py]
1 import click
2 from complex.cli import pass_context
3
4
5 @click.command('status', short_help='Shows file changes.')
6 @pass_context
7 def cli(ctx):
8 """Shows file changes in the current working directory."""
9 ctx.log('Changed files: none')
10 ctx.vlog('bla bla bla, debug info')
11
[end of examples/complex/complex/commands/cmd_status.py]
[start of examples/complex/complex/cli.py]
1 import os
2 import sys
3 import click
4
5
6 CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')
7
8
9 class Context(object):
10
11 def __init__(self):
12 self.verbose = False
13 self.home = os.getcwd()
14
15 def log(self, msg, *args):
16 """Logs a message to stderr."""
17 if args:
18 msg %= args
19 click.echo(msg, file=sys.stderr)
20
21 def vlog(self, msg, *args):
22 """Logs a message to stderr only if verbose is enabled."""
23 if self.verbose:
24 self.log(msg, *args)
25
26
27 pass_context = click.make_pass_decorator(Context, ensure=True)
28 cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
29 'commands'))
30
31
32 class ComplexCLI(click.MultiCommand):
33
34 def list_commands(self, ctx):
35 rv = []
36 for filename in os.listdir(cmd_folder):
37 if filename.endswith('.py') and \
38 filename.startswith('cmd_'):
39 rv.append(filename[4:-3])
40 rv.sort()
41 return rv
42
43 def get_command(self, ctx, name):
44 try:
45 if sys.version_info[0] == 2:
46 name = name.encode('ascii', 'replace')
47 mod = __import__('complex.commands.cmd_' + name,
48 None, None, ['cli'])
49 except ImportError:
50 return
51 return mod.cli
52
53
54 @click.command(cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)
55 @click.option('--home', type=click.Path(exists=True, file_okay=False,
56 resolve_path=True),
57 help='Changes the folder to operate on.')
58 @click.option('-v', '--verbose', is_flag=True,
59 help='Enables verbose mode.')
60 @pass_context
61 def cli(ctx, verbose, home):
62 """A complex command line interface."""
63 ctx.verbose = verbose
64 if home is not None:
65 ctx.home = home
66
[end of examples/complex/complex/cli.py]
[start of examples/complex/complex/commands/cmd_init.py]
1 import click
2 from complex.cli import pass_context
3
4
5 @click.command('init', short_help='Initializes a repo.')
6 @click.argument('path', required=False, type=click.Path(resolve_path=True))
7 @pass_context
8 def cli(ctx, path):
9 """Initializes a repository."""
10 if path is None:
11 path = ctx.home
12 ctx.log('Initialized the repository in %s',
13 click.format_filename(path))
14
[end of examples/complex/complex/commands/cmd_init.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/complex/complex/cli.py b/examples/complex/complex/cli.py
--- a/examples/complex/complex/cli.py
+++ b/examples/complex/complex/cli.py
@@ -6,7 +6,7 @@
CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')
-class Context(object):
+class Environment(object):
def __init__(self):
self.verbose = False
@@ -24,7 +24,7 @@
self.log(msg, *args)
-pass_context = click.make_pass_decorator(Context, ensure=True)
+pass_environment = click.make_pass_decorator(Environment, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
@@ -57,7 +57,7 @@
help='Changes the folder to operate on.')
@click.option('-v', '--verbose', is_flag=True,
help='Enables verbose mode.')
-@pass_context
+@pass_environment
def cli(ctx, verbose, home):
"""A complex command line interface."""
ctx.verbose = verbose
diff --git a/examples/complex/complex/commands/cmd_init.py b/examples/complex/complex/commands/cmd_init.py
--- a/examples/complex/complex/commands/cmd_init.py
+++ b/examples/complex/complex/commands/cmd_init.py
@@ -1,10 +1,10 @@
import click
-from complex.cli import pass_context
+from complex.cli import pass_environment
@click.command('init', short_help='Initializes a repo.')
@click.argument('path', required=False, type=click.Path(resolve_path=True))
-@pass_context
+@pass_environment
def cli(ctx, path):
"""Initializes a repository."""
if path is None:
diff --git a/examples/complex/complex/commands/cmd_status.py b/examples/complex/complex/commands/cmd_status.py
--- a/examples/complex/complex/commands/cmd_status.py
+++ b/examples/complex/complex/commands/cmd_status.py
@@ -1,9 +1,9 @@
import click
-from complex.cli import pass_context
+from complex.cli import pass_environment
@click.command('status', short_help='Shows file changes.')
-@pass_context
+@pass_environment
def cli(ctx):
"""Shows file changes in the current working directory."""
ctx.log('Changed files: none')
|
{"golden_diff": "diff --git a/examples/complex/complex/cli.py b/examples/complex/complex/cli.py\n--- a/examples/complex/complex/cli.py\n+++ b/examples/complex/complex/cli.py\n@@ -6,7 +6,7 @@\n CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')\n \n \n-class Context(object):\n+class Environment(object):\n \n def __init__(self):\n self.verbose = False\n@@ -24,7 +24,7 @@\n self.log(msg, *args)\n \n \n-pass_context = click.make_pass_decorator(Context, ensure=True)\n+pass_environment = click.make_pass_decorator(Environment, ensure=True)\n cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'commands'))\n \n@@ -57,7 +57,7 @@\n help='Changes the folder to operate on.')\n @click.option('-v', '--verbose', is_flag=True,\n help='Enables verbose mode.')\n-@pass_context\n+@pass_environment\n def cli(ctx, verbose, home):\n \"\"\"A complex command line interface.\"\"\"\n ctx.verbose = verbose\ndiff --git a/examples/complex/complex/commands/cmd_init.py b/examples/complex/complex/commands/cmd_init.py\n--- a/examples/complex/complex/commands/cmd_init.py\n+++ b/examples/complex/complex/commands/cmd_init.py\n@@ -1,10 +1,10 @@\n import click\n-from complex.cli import pass_context\n+from complex.cli import pass_environment\n \n \n @click.command('init', short_help='Initializes a repo.')\n @click.argument('path', required=False, type=click.Path(resolve_path=True))\n-@pass_context\n+@pass_environment\n def cli(ctx, path):\n \"\"\"Initializes a repository.\"\"\"\n if path is None:\ndiff --git a/examples/complex/complex/commands/cmd_status.py b/examples/complex/complex/commands/cmd_status.py\n--- a/examples/complex/complex/commands/cmd_status.py\n+++ b/examples/complex/complex/commands/cmd_status.py\n@@ -1,9 +1,9 @@\n import click\n-from complex.cli import pass_context\n+from complex.cli import pass_environment\n \n \n @click.command('status', short_help='Shows file changes.')\n-@pass_context\n+@pass_environment\n def cli(ctx):\n \"\"\"Shows file changes in the current working directory.\"\"\"\n ctx.log('Changed files: none')\n", "issue": "complex example - misleading name for context\nThe name `Context` and `pass_context` are misleading in the complex example, since the `Context` defined in the example shares a name with the click `Context`. Maybe a different name such as \"Environment\" or \"Options\" would be more appropriate.\n", "before_files": [{"content": "import click\nfrom complex.cli import pass_context\n\n\[email protected]('status', short_help='Shows file changes.')\n@pass_context\ndef cli(ctx):\n \"\"\"Shows file changes in the current working directory.\"\"\"\n ctx.log('Changed files: none')\n ctx.vlog('bla bla bla, debug info')\n", "path": "examples/complex/complex/commands/cmd_status.py"}, {"content": "import os\nimport sys\nimport click\n\n\nCONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')\n\n\nclass Context(object):\n\n def __init__(self):\n self.verbose = False\n self.home = os.getcwd()\n\n def log(self, msg, *args):\n \"\"\"Logs a message to stderr.\"\"\"\n if args:\n msg %= args\n click.echo(msg, file=sys.stderr)\n\n def vlog(self, msg, *args):\n \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\"\n if self.verbose:\n self.log(msg, *args)\n\n\npass_context = click.make_pass_decorator(Context, ensure=True)\ncmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'commands'))\n\n\nclass ComplexCLI(click.MultiCommand):\n\n def list_commands(self, ctx):\n rv = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and \\\n filename.startswith('cmd_'):\n rv.append(filename[4:-3])\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('complex.commands.cmd_' + name,\n None, None, ['cli'])\n except ImportError:\n return\n return mod.cli\n\n\[email protected](cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)\[email protected]('--home', type=click.Path(exists=True, file_okay=False,\n resolve_path=True),\n help='Changes the folder to operate on.')\[email protected]('-v', '--verbose', is_flag=True,\n help='Enables verbose mode.')\n@pass_context\ndef cli(ctx, verbose, home):\n \"\"\"A complex command line interface.\"\"\"\n ctx.verbose = verbose\n if home is not None:\n ctx.home = home\n", "path": "examples/complex/complex/cli.py"}, {"content": "import click\nfrom complex.cli import pass_context\n\n\[email protected]('init', short_help='Initializes a repo.')\[email protected]('path', required=False, type=click.Path(resolve_path=True))\n@pass_context\ndef cli(ctx, path):\n \"\"\"Initializes a repository.\"\"\"\n if path is None:\n path = ctx.home\n ctx.log('Initialized the repository in %s',\n click.format_filename(path))\n", "path": "examples/complex/complex/commands/cmd_init.py"}]}
| 1,366 | 499 |
gh_patches_debug_146
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1530
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
doccano init causes a ModuleNotFoundError for chardet
How to reproduce the behaviour
---------
Create a fresh virtualenv in which to test, then install the latest release of doccano from PyPi (v1.4.1):
```
$ virtualenv env
[...virtualenv output removed...]
$ source env/bin/activate
(env) $ pip install doccano
[... main output removed...]
Successfully installed Django-3.2.6 MarkupSafe-2.0.1 PyJWT-2.1.0 amqp-5.0.6 apache-libcloud-3.3.1 asgiref-3.4.1 auto-labeling-pipeline-0.1.21 billiard-3.6.4.0 boto3-1.18.30 botocore-1.21.30 celery-5.1.2 certifi-2021.5.30 cffi-1.14.6 charset-normalizer-2.0.4 click-7.1.2 click-didyoumean-0.0.3 click-plugins-1.1.1 click-repl-0.2.0 colour-0.1.5 conllu-4.4.1 coreapi-2.3.3 coreschema-0.0.4 cryptography-3.4.8 defusedxml-0.7.1 dj-database-url-0.5.0 dj-rest-auth-2.1.11 django-celery-results-2.2.0 django-cors-headers-3.8.0 django-drf-filepond-0.4.0 django-filter-2.4.0 django-polymorphic-3.0.0 django-rest-polymorphic-0.1.9 django-storages-1.11.1 djangorestframework-3.12.4 djangorestframework-csv-2.1.1 djangorestframework-xml-2.0.0 doccano-1.4.1 drf-yasg-1.20.0 ecdsa-0.17.0 environs-9.3.3 et-xmlfile-1.1.0 furl-2.1.2 greenlet-1.1.1 gunicorn-20.1.0 idna-3.2 inflection-0.5.1 itypes-1.2.0 jinja2-3.0.1 jmespath-0.10.0 joblib-1.0.1 kombu-5.1.0 lml-0.1.0 marshmallow-3.13.0 numpy-1.21.2 oauthlib-3.1.1 openpyxl-3.0.7 orderedmultidict-1.0.1 packaging-21.0 prompt-toolkit-3.0.20 pyasn1-0.4.8 pycparser-2.20 pydantic-1.8.2 pyexcel-0.6.6 pyexcel-io-0.6.4 pyexcel-xlsx-0.6.0 pyparsing-2.4.7 python-dateutil-2.8.2 python-dotenv-0.19.0 python-jose-3.3.0 python3-openid-3.2.0 pytz-2021.1 requests-2.26.0 requests-oauthlib-1.3.0 rsa-4.7.2 ruamel.yaml-0.17.14 ruamel.yaml.clib-0.2.6 s3transfer-0.5.0 scikit-learn-0.24.2 scipy-1.7.1 seqeval-1.2.2 shortuuid-1.0.1 six-1.16.0 social-auth-app-django-5.0.0 social-auth-core-4.1.0 sqlalchemy-1.4.23 sqlparse-0.4.1 texttable-1.6.4 threadpoolctl-2.2.0 typing-extensions-3.10.0.0 unicodecsv-0.14.1 uritemplate-3.0.1 urllib3-1.26.6 vine-5.0.0 wcwidth-0.2.5 whitenoise-5.3.0
```
Now run `doccano init`:
```
(env) $ doccano init
```
This results in a set of long stack traces all rooted on [doccano/backend/api/views/upload/dataset.py:L7](https://github.com/doccano/doccano/blob/3bf91c1e30c00693362491932a6aa802235a5f95/backend/api/views/upload/dataset.py#L7) - `import chardet`
```
Traceback (most recent call last):
File "/env/lib/python3.8/site-packages/backend/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/env/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
[...traceback truncated...]
File "/env/lib/python3.8/site-packages/backend/api/urls.py", line 3, in <module>
from . import views
File "/env/lib/python3.8/site-packages/backend/api/views/__init__.py", line 5, in <module>
from .export_dataset import *
File "/env/lib/python3.8/site-packages/backend/api/views/export_dataset.py", line 11, in <module>
from ..tasks import export_dataset
File "/env/lib/python3.8/site-packages/backend/api/tasks.py", line 13, in <module>
from .views.upload.factory import (get_data_class, get_dataset_class,
File "/env/lib/python3.8/site-packages/backend/api/views/upload/factory.py", line 3, in <module>
from . import catalog, data, dataset, label
File "/env/lib/python3.8/site-packages/backend/api/views/upload/dataset.py", line 7, in <module>
import chardet
ModuleNotFoundError: No module named 'chardet'
```
`pip install chardet` resolves the issue and `doccano init` then completes successfully and I'm able to run the app.
Your Environment
---------
* **Operating System:** Tested on both macOS 10.15.7 and Ubuntu 20.04
* **Python Version Used:** 3.8.9 (macOS, via macports), 3.8.10 (Ubuntu)
* **When you install doccano:** 27th Aug 2021 - installing current release from PyPi, v1.4.1
* **How did you install doccano (Heroku button etc):** Installing v1.4.1 from PyPi using `pip install doccano` into a clean python virtualenv.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import io
4 import os
5
6 from setuptools import find_packages, setup
7
8 NAME = 'doccano'
9 DESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'
10 URL = 'https://github.com/doccano/doccano'
11 EMAIL = '[email protected]'
12 AUTHOR = 'Hironsan'
13 LICENSE = 'MIT'
14
15 here = os.path.abspath(os.path.dirname(__file__))
16 with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
17 long_description = '\n' + f.read()
18
19 required = [
20 'apache-libcloud>=3.2.0',
21 'colour>=0.1.5',
22 'conllu>=4.2.2',
23 'dj-database-url>=0.5.0',
24 'django-cors-headers>=3.5.0',
25 'django-filter>=2.4.0',
26 'django-rest-polymorphic>=0.1.9',
27 'djangorestframework-csv>=2.1.0',
28 'djangorestframework-xml>=2.0.0',
29 'drf-yasg>=1.20.0',
30 'environs>=9.2.0',
31 'furl>=2.1.0',
32 'pyexcel>=0.6.6',
33 'pyexcel-xlsx>=0.6.0',
34 'python-jose>=3.2.0',
35 'seqeval>=1.2.2',
36 'social-auth-app-django>=4.0.0',
37 'whitenoise>=5.2.0',
38 'auto-labeling-pipeline>=0.1.12',
39 'celery>=5.0.5',
40 'dj-rest-auth>=2.1.4',
41 'django-celery-results>=2.0.1',
42 'django-drf-filepond>=0.3.0',
43 'sqlalchemy>=1.4.7',
44 'gunicorn>=20.1.0',
45 'waitress>=2.0.0',
46 ]
47
48 setup(
49 name=NAME,
50 use_scm_version=True,
51 setup_requires=['setuptools_scm'],
52 description=DESCRIPTION,
53 long_description=long_description,
54 long_description_content_type='text/markdown',
55 author=AUTHOR,
56 author_email=EMAIL,
57 url=URL,
58 packages=find_packages(exclude=('*.tests',)),
59 entry_points={
60 'console_scripts': [
61 'doccano = backend.cli:main'
62 ]
63 },
64 install_requires=required,
65 extras_require={
66 'postgresql': ['psycopg2-binary>=2.8.6'],
67 'mssql': ['django-mssql-backend>=2.8.1'],
68 },
69 include_package_data=True,
70 license=LICENSE,
71 classifiers=[
72 'License :: OSI Approved :: MIT License',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: 3.8',
77 'Programming Language :: Python :: Implementation :: CPython',
78 'Programming Language :: Python :: Implementation :: PyPy'
79 ],
80 )
81
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,6 +43,8 @@
'sqlalchemy>=1.4.7',
'gunicorn>=20.1.0',
'waitress>=2.0.0',
+ 'pydantic>=1.8.2',
+ 'chardet>=4.0.0'
]
setup(
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,6 +43,8 @@\n 'sqlalchemy>=1.4.7',\n 'gunicorn>=20.1.0',\n 'waitress>=2.0.0',\n+ 'pydantic>=1.8.2',\n+ 'chardet>=4.0.0'\n ]\n \n setup(\n", "issue": "doccano init causes a ModuleNotFoundError for chardet\nHow to reproduce the behaviour\r\n---------\r\n\r\nCreate a fresh virtualenv in which to test, then install the latest release of doccano from PyPi (v1.4.1):\r\n\r\n```\r\n$ virtualenv env\r\n [...virtualenv output removed...]\r\n\r\n$ source env/bin/activate\r\n(env) $ pip install doccano\r\n\r\n [... main output removed...]\r\nSuccessfully installed Django-3.2.6 MarkupSafe-2.0.1 PyJWT-2.1.0 amqp-5.0.6 apache-libcloud-3.3.1 asgiref-3.4.1 auto-labeling-pipeline-0.1.21 billiard-3.6.4.0 boto3-1.18.30 botocore-1.21.30 celery-5.1.2 certifi-2021.5.30 cffi-1.14.6 charset-normalizer-2.0.4 click-7.1.2 click-didyoumean-0.0.3 click-plugins-1.1.1 click-repl-0.2.0 colour-0.1.5 conllu-4.4.1 coreapi-2.3.3 coreschema-0.0.4 cryptography-3.4.8 defusedxml-0.7.1 dj-database-url-0.5.0 dj-rest-auth-2.1.11 django-celery-results-2.2.0 django-cors-headers-3.8.0 django-drf-filepond-0.4.0 django-filter-2.4.0 django-polymorphic-3.0.0 django-rest-polymorphic-0.1.9 django-storages-1.11.1 djangorestframework-3.12.4 djangorestframework-csv-2.1.1 djangorestframework-xml-2.0.0 doccano-1.4.1 drf-yasg-1.20.0 ecdsa-0.17.0 environs-9.3.3 et-xmlfile-1.1.0 furl-2.1.2 greenlet-1.1.1 gunicorn-20.1.0 idna-3.2 inflection-0.5.1 itypes-1.2.0 jinja2-3.0.1 jmespath-0.10.0 joblib-1.0.1 kombu-5.1.0 lml-0.1.0 marshmallow-3.13.0 numpy-1.21.2 oauthlib-3.1.1 openpyxl-3.0.7 orderedmultidict-1.0.1 packaging-21.0 prompt-toolkit-3.0.20 pyasn1-0.4.8 pycparser-2.20 pydantic-1.8.2 pyexcel-0.6.6 pyexcel-io-0.6.4 pyexcel-xlsx-0.6.0 pyparsing-2.4.7 python-dateutil-2.8.2 python-dotenv-0.19.0 python-jose-3.3.0 python3-openid-3.2.0 pytz-2021.1 requests-2.26.0 requests-oauthlib-1.3.0 rsa-4.7.2 ruamel.yaml-0.17.14 ruamel.yaml.clib-0.2.6 s3transfer-0.5.0 scikit-learn-0.24.2 scipy-1.7.1 seqeval-1.2.2 shortuuid-1.0.1 six-1.16.0 social-auth-app-django-5.0.0 social-auth-core-4.1.0 sqlalchemy-1.4.23 sqlparse-0.4.1 texttable-1.6.4 threadpoolctl-2.2.0 typing-extensions-3.10.0.0 unicodecsv-0.14.1 uritemplate-3.0.1 urllib3-1.26.6 vine-5.0.0 wcwidth-0.2.5 whitenoise-5.3.0\r\n```\r\n\r\nNow run `doccano init`:\r\n```\r\n(env) $ doccano init\r\n```\r\n\r\nThis results in a set of long stack traces all rooted on [doccano/backend/api/views/upload/dataset.py:L7](https://github.com/doccano/doccano/blob/3bf91c1e30c00693362491932a6aa802235a5f95/backend/api/views/upload/dataset.py#L7) - `import chardet`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/env/lib/python3.8/site-packages/backend/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/env/lib/python3.8/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/env/lib/python3.8/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/env/lib/python3.8/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n\r\n[...traceback truncated...]\r\n\r\n File \"/env/lib/python3.8/site-packages/backend/api/urls.py\", line 3, in <module>\r\n from . import views\r\n File \"/env/lib/python3.8/site-packages/backend/api/views/__init__.py\", line 5, in <module>\r\n from .export_dataset import *\r\n File \"/env/lib/python3.8/site-packages/backend/api/views/export_dataset.py\", line 11, in <module>\r\n from ..tasks import export_dataset\r\n File \"/env/lib/python3.8/site-packages/backend/api/tasks.py\", line 13, in <module>\r\n from .views.upload.factory import (get_data_class, get_dataset_class,\r\n File \"/env/lib/python3.8/site-packages/backend/api/views/upload/factory.py\", line 3, in <module>\r\n from . import catalog, data, dataset, label\r\n File \"/env/lib/python3.8/site-packages/backend/api/views/upload/dataset.py\", line 7, in <module>\r\n import chardet\r\nModuleNotFoundError: No module named 'chardet'\r\n```\r\n\r\n`pip install chardet` resolves the issue and `doccano init` then completes successfully and I'm able to run the app. \r\n\r\nYour Environment\r\n---------\r\n\r\n* **Operating System:** Tested on both macOS 10.15.7 and Ubuntu 20.04\r\n* **Python Version Used:** 3.8.9 (macOS, via macports), 3.8.10 (Ubuntu)\r\n* **When you install doccano:** 27th Aug 2021 - installing current release from PyPi, v1.4.1\r\n* **How did you install doccano (Heroku button etc):** Installing v1.4.1 from PyPi using `pip install doccano` into a clean python virtualenv.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0',\n 'auto-labeling-pipeline>=0.1.12',\n 'celery>=5.0.5',\n 'dj-rest-auth>=2.1.4',\n 'django-celery-results>=2.0.1',\n 'django-drf-filepond>=0.3.0',\n 'sqlalchemy>=1.4.7',\n 'gunicorn>=20.1.0',\n 'waitress>=2.0.0',\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = backend.cli:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py"}]}
| 3,010 | 96 |
gh_patches_debug_249
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-3790
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The aws-cli bundle package uses an insecure version of PyYAML
### awscli version:<br>
`aws-cli/1.16.52 Python/2.7.15 Linux/4.14.77-69.57.amzn1.x86_64 exec-env/AWS_ECS_EC2 botocore/1.12.42`
[NVD entry](https://nvd.nist.gov/vuln/detail/CVE-2017-18342)
This issue was found when vulnerability alerts started appearing in Twistlock in response to scans of Docker images that we are using in several applications. The generic error found in these outlines is as such:<br>
```
Impacted versions: <=3.13
In PyYAML before 4.1, the yaml.load() API could execute arbitrary code. In other words, yaml.safe_load is not used.
```
These images are not natively using PyYAML, so this led us to a Docker `RUN` line in a Dockerfile that executed a script that contains a line of code that executes the installation of the `aws-cli` bundle using the following URL:<br>
`https://s3.amazonaws.com/aws-cli/awscli-bundle.zip`
Unpacking this archive shows a list of package dependencies that includes the vulnerable version of PyYAML:<br>
`awscli-bundle/packages/PyYAML-3.13.tar.gz`
The latest (and actually secure) version of PyYAML appears to be 4.1 according to the developer via the [GitHub repo](https://github.com/yaml/pyyaml).
### Request
Is it possible to have the patched version of PyYAML added to this bundle to avoid this vulnerability?
Thank you!
</issue>
<code>
[start of awscli/customizations/ecs/filehelpers.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import json
15 import yaml
16
17 from awscli.customizations.ecs import exceptions
18
19 MAX_CHAR_LENGTH = 46
20 APP_PREFIX = 'AppECS-'
21 DGP_PREFIX = 'DgpECS-'
22
23
24 def find_required_key(resource_name, obj, key):
25
26 if obj is None:
27 raise exceptions.MissingPropertyError(
28 resource=resource_name, prop_name=key)
29
30 result = _get_case_insensitive_key(obj, key)
31
32 if result is None:
33 raise exceptions.MissingPropertyError(
34 resource=resource_name, prop_name=key)
35 else:
36 return result
37
38
39 def _get_case_insensitive_key(target_obj, target_key):
40 key_to_match = target_key.lower()
41 key_list = target_obj.keys()
42
43 for key in key_list:
44 if key.lower() == key_to_match:
45 return key
46
47
48 def get_app_name(service, cluster, app_value):
49 if app_value is not None:
50 return app_value
51 else:
52 suffix = _get_ecs_suffix(service, cluster)
53 return APP_PREFIX + suffix
54
55
56 def get_cluster_name_from_arn(arn):
57 return arn.split('/')[1]
58
59
60 def get_deploy_group_name(service, cluster, dg_value):
61 if dg_value is not None:
62 return dg_value
63 else:
64 suffix = _get_ecs_suffix(service, cluster)
65 return DGP_PREFIX + suffix
66
67
68 def _get_ecs_suffix(service, cluster):
69 if cluster is None:
70 cluster_name = 'default'
71 else:
72 cluster_name = cluster[:MAX_CHAR_LENGTH]
73
74 return cluster_name + '-' + service[:MAX_CHAR_LENGTH]
75
76
77 def parse_appspec(appspec_str):
78 try:
79 return json.loads(appspec_str)
80 except ValueError:
81 return yaml.load(appspec_str)
82
[end of awscli/customizations/ecs/filehelpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py
--- a/awscli/customizations/ecs/filehelpers.py
+++ b/awscli/customizations/ecs/filehelpers.py
@@ -78,4 +78,4 @@
try:
return json.loads(appspec_str)
except ValueError:
- return yaml.load(appspec_str)
+ return yaml.safe_load(appspec_str)
|
{"golden_diff": "diff --git a/awscli/customizations/ecs/filehelpers.py b/awscli/customizations/ecs/filehelpers.py\n--- a/awscli/customizations/ecs/filehelpers.py\n+++ b/awscli/customizations/ecs/filehelpers.py\n@@ -78,4 +78,4 @@\n try:\n return json.loads(appspec_str)\n except ValueError:\n- return yaml.load(appspec_str)\n+ return yaml.safe_load(appspec_str)\n", "issue": "The aws-cli bundle package uses an insecure version of PyYAML\n### awscli version:<br>\r\n`aws-cli/1.16.52 Python/2.7.15 Linux/4.14.77-69.57.amzn1.x86_64 exec-env/AWS_ECS_EC2 botocore/1.12.42`\r\n\r\n[NVD entry](https://nvd.nist.gov/vuln/detail/CVE-2017-18342)\r\n\r\nThis issue was found when vulnerability alerts started appearing in Twistlock in response to scans of Docker images that we are using in several applications. The generic error found in these outlines is as such:<br>\r\n\r\n```\r\nImpacted versions: <=3.13\r\nIn PyYAML before 4.1, the yaml.load() API could execute arbitrary code. In other words, yaml.safe_load is not used.\r\n```\r\n\r\nThese images are not natively using PyYAML, so this led us to a Docker `RUN` line in a Dockerfile that executed a script that contains a line of code that executes the installation of the `aws-cli` bundle using the following URL:<br>\r\n\r\n`https://s3.amazonaws.com/aws-cli/awscli-bundle.zip`\r\n\r\nUnpacking this archive shows a list of package dependencies that includes the vulnerable version of PyYAML:<br>\r\n\r\n`awscli-bundle/packages/PyYAML-3.13.tar.gz`\r\n\r\nThe latest (and actually secure) version of PyYAML appears to be 4.1 according to the developer via the [GitHub repo](https://github.com/yaml/pyyaml).\r\n\r\n### Request\r\n\r\nIs it possible to have the patched version of PyYAML added to this bundle to avoid this vulnerability?\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport json\nimport yaml\n\nfrom awscli.customizations.ecs import exceptions\n\nMAX_CHAR_LENGTH = 46\nAPP_PREFIX = 'AppECS-'\nDGP_PREFIX = 'DgpECS-'\n\n\ndef find_required_key(resource_name, obj, key):\n\n if obj is None:\n raise exceptions.MissingPropertyError(\n resource=resource_name, prop_name=key)\n\n result = _get_case_insensitive_key(obj, key)\n\n if result is None:\n raise exceptions.MissingPropertyError(\n resource=resource_name, prop_name=key)\n else:\n return result\n\n\ndef _get_case_insensitive_key(target_obj, target_key):\n key_to_match = target_key.lower()\n key_list = target_obj.keys()\n\n for key in key_list:\n if key.lower() == key_to_match:\n return key\n\n\ndef get_app_name(service, cluster, app_value):\n if app_value is not None:\n return app_value\n else:\n suffix = _get_ecs_suffix(service, cluster)\n return APP_PREFIX + suffix\n\n\ndef get_cluster_name_from_arn(arn):\n return arn.split('/')[1]\n\n\ndef get_deploy_group_name(service, cluster, dg_value):\n if dg_value is not None:\n return dg_value\n else:\n suffix = _get_ecs_suffix(service, cluster)\n return DGP_PREFIX + suffix\n\n\ndef _get_ecs_suffix(service, cluster):\n if cluster is None:\n cluster_name = 'default'\n else:\n cluster_name = cluster[:MAX_CHAR_LENGTH]\n\n return cluster_name + '-' + service[:MAX_CHAR_LENGTH]\n\n\ndef parse_appspec(appspec_str):\n try:\n return json.loads(appspec_str)\n except ValueError:\n return yaml.load(appspec_str)\n", "path": "awscli/customizations/ecs/filehelpers.py"}]}
| 1,598 | 94 |
gh_patches_debug_9773
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-5573
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] tcl/8.6.10: tcl should be dependent to CoreFoundation on mac
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **tcl/8.6.10**
* Operating System+version: **macOS 10.15.7**
* Compiler+version: **Apple-Clang**
* Conan version: **conan 1.36.0**
* Python version: **Python 3.9.5**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Macos
os_build=Macos
arch=x86_64
arch_build=x86_64
compiler=apple-clang
compiler.version=12.0
compiler.libcxx=libc++
build_type=Release
[options]
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
install tcl/8.6.10
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
$ otool -L ~/.conan/data/tcl/8.6.10/_/_/package/88955cec2844f731470e07bd44ce5a3a24ec88b7/bin/tclsh8.6
/Users/shiena/.conan/data/tcl/8.6.10/_/_/package/88955cec2844f731470e07bd44ce5a3a24ec88b7/bin/tclsh8.6:
/usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1281.100.1)
/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 1677.104.0)
```
</details>
tcl links to CoreFoundation on mac. But conanfile.py depends on Cocoa. Therefore tcl should be dependent to CoreFoundation.
https://github.com/tcltk/tcl/blob/main/unix/configure#L1455
https://github.com/conan-io/conan-center-index/blob/master/recipes/tcl/8.6.10/conanfile.py#L209
</issue>
<code>
[start of recipes/tcl/8.6.10/conanfile.py]
1 from conans import ConanFile, AutoToolsBuildEnvironment, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5
6 class TclConan(ConanFile):
7 name = "tcl"
8 version = "8.6.10"
9 description = "Tcl is a very powerful but easy to learn dynamic programming language."
10 topics = ("conan", "tcl", "scripting", "programming")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://tcl.tk"
13 license = "TCL"
14 settings = "os", "compiler", "build_type", "arch"
15 options = {
16 "fPIC": [True, False],
17 "shared": [True, False]
18 }
19 default_options = {
20 "fPIC": True,
21 "shared": False,
22 }
23 exports_sources = ("patches/*")
24 requires = ("zlib/1.2.11")
25
26 _autotools = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def config_options(self):
33 if self.settings.os == "Windows":
34 del self.options.fPIC
35
36 def configure(self):
37 if self.settings.os not in ("Linux", "Macos", "Windows"):
38 raise ConanInvalidConfiguration("Unsupported os")
39 if self.options.shared:
40 del self.options.fPIC
41 del self.settings.compiler.libcxx
42 del self.settings.compiler.cppstd
43
44 def build_requirements(self):
45 if tools.os_info.is_windows and self.settings.compiler != "Visual Studio" and \
46 "CONAN_BASH_PATH" not in os.environ and tools.os_info.detect_windows_subsystem() != "msys2":
47 self.build_requires("msys2/20190524")
48
49 def source(self):
50 tools.get(**self.conan_data["sources"][self.version])
51 extracted_dir = self.name + self.version
52 os.rename(extracted_dir, self._source_subfolder)
53
54 def _get_default_build_system_subdir(self):
55 return {
56 "Macos": "macosx",
57 "Linux": "unix",
58 "Windows": "win",
59 }[str(self.settings.os)]
60
61 def _get_configure_dir(self, build_system_subdir=None):
62 if build_system_subdir is None:
63 build_system_subdir = self._get_default_build_system_subdir()
64 return os.path.join(self.source_folder, self._source_subfolder, build_system_subdir)
65
66 def _patch_sources(self):
67 unix_config_dir = self._get_configure_dir("unix")
68 # When disabling 64-bit support (in 32-bit), this test must be 0 in order to use "long long" for 64-bit ints
69 # (${tcl_type_64bit} can be either "__int64" or "long long")
70 tools.replace_in_file(os.path.join(unix_config_dir, "configure"),
71 "(sizeof(${tcl_type_64bit})==sizeof(long))",
72 "(sizeof(${tcl_type_64bit})!=sizeof(long))")
73
74 unix_makefile_in = os.path.join(unix_config_dir, "Makefile.in")
75 # Avoid building internal libraries as shared libraries
76 tools.replace_in_file(unix_makefile_in, "--enable-shared --enable-threads", "--enable-threads")
77 # Avoid clearing CFLAGS and LDFLAGS in the makefile
78 tools.replace_in_file(unix_makefile_in, "\nCFLAGS\t", "\n#CFLAGS\t")
79 tools.replace_in_file(unix_makefile_in, "\nLDFLAGS\t", "\n#LDFLAGS\t")
80 # Use CFLAGS and CPPFLAGS as argument to CC
81 tools.replace_in_file(unix_makefile_in, "${CFLAGS}", "${CFLAGS} ${CPPFLAGS}")
82 # nmake creates a temporary file with mixed forward/backward slashes
83 # force the filename to avoid cryptic error messages
84 win_config_dir = self._get_configure_dir("win")
85 win_makefile_vc = os.path.join(win_config_dir, "makefile.vc")
86 tools.replace_in_file(win_makefile_vc, "@type << >$@", "type <<temp.tmp >$@")
87
88 win_rules_vc = os.path.join(self._source_subfolder, "win", "rules.vc")
89 # do not treat nmake build warnings as errors
90 tools.replace_in_file(win_rules_vc, "cwarn = $(cwarn) -WX", "")
91 # disable whole program optimization to be portable across different MSVC versions.
92 # See conan-io/conan-center-index#4811 conan-io/conan-center-index#4094
93 tools.replace_in_file(
94 win_rules_vc,
95 "OPTIMIZATIONS = $(OPTIMIZATIONS) -GL",
96 "")
97
98 def _build_nmake(self, targets):
99 opts = []
100 # https://core.tcl.tk/tips/doc/trunk/tip/477.md
101 if not self.options.shared:
102 opts.append("static")
103 if self.settings.build_type == "Debug":
104 opts.append("symbols")
105 if "MD" in self.settings.compiler.runtime:
106 opts.append("msvcrt")
107 else:
108 opts.append("nomsvcrt")
109 if "d" not in self.settings.compiler.runtime:
110 opts.append("unchecked")
111 with tools.vcvars(self.settings):
112 with tools.chdir(self._get_configure_dir("win")):
113 self.run('nmake -nologo -f "{cfgdir}/makefile.vc" INSTALLDIR="{pkgdir}" OPTS={opts} {targets}'.format(
114 cfgdir=self._get_configure_dir("win"),
115 pkgdir=self.package_folder,
116 opts=",".join(opts),
117 targets=" ".join(targets),
118 ))
119
120 def _configure_autotools(self):
121 if self._autotools:
122 return self._autotools
123 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
124 conf_args = [
125 "--enable-threads",
126 "--enable-shared" if self.options.shared else "--disable-shared",
127 "--enable-symbols" if self.settings.build_type == "Debug" else "--disable-symbols",
128 "--enable-64bit" if self.settings.arch == "x86_64" else "--disable-64bit",
129 ]
130 self._autotools.configure(configure_dir=self._get_configure_dir(), args=conf_args, vars={"PKG_CFG_ARGS": " ".join(conf_args)})
131
132 # https://core.tcl.tk/tcl/tktview/840660e5a1
133 for root, _, files in os.walk(self.build_folder):
134 if "Makefile" in files:
135 tools.replace_in_file(os.path.join(root, "Makefile"), "-Dstrtod=fixstrtod", "", strict=False)
136 return self._autotools
137
138 def build(self):
139 for patch in self.conan_data["patches"][self.version]:
140 tools.patch(**patch)
141 self._patch_sources()
142 if self.settings.compiler == "Visual Studio":
143 self._build_nmake(["release"])
144 else:
145 autotools = self._configure_autotools()
146 autotools.make()
147
148 def package(self):
149 self.copy(pattern="license.terms", dst="licenses", src=self._source_subfolder)
150 if self.settings.compiler == "Visual Studio":
151 self._build_nmake(["install-binaries", "install-libraries"])
152 else:
153 autotools = self._configure_autotools()
154 autotools.install()
155 autotools.make(target="install-private-headers")
156
157 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
158 tools.rmdir(os.path.join(self.package_folder, "man"))
159 tools.rmdir(os.path.join(self.package_folder, "share"))
160
161 tclConfigShPath = os.path.join(self.package_folder, "lib", "tclConfig.sh")
162 package_path = self.package_folder
163 build_folder = self.build_folder
164 if self.settings.os == "Windows" and self.settings.compiler != "Visual Studio":
165 package_path = package_path.replace("\\", "/")
166 drive, path = os.path.splitdrive(self.build_folder)
167 build_folder = "".join([drive, path.lower().replace("\\", "/")])
168
169 tools.replace_in_file(tclConfigShPath,
170 package_path,
171 "${TCL_ROOT}")
172 tools.replace_in_file(tclConfigShPath,
173 build_folder,
174 "${TCL_BUILD_ROOT}")
175
176 tools.replace_in_file(tclConfigShPath,
177 "\nTCL_BUILD_",
178 "\n#TCL_BUILD_")
179 tools.replace_in_file(tclConfigShPath,
180 "\nTCL_SRC_DIR",
181 "\n#TCL_SRC_DIR")
182
183 def package_info(self):
184 libs = []
185 systemlibs = []
186 libdirs = []
187 for root, _, _ in os.walk(os.path.join(self.package_folder, "lib"), topdown=False):
188 newlibs = tools.collect_libs(self, root)
189 if newlibs:
190 libs.extend(newlibs)
191 libdirs.append(root)
192 if self.settings.os == "Windows":
193 systemlibs.extend(["ws2_32", "netapi32", "userenv"])
194 else:
195 systemlibs.extend(["m", "pthread", "dl"])
196
197 defines = []
198 if not self.options.shared:
199 defines.append("STATIC_BUILD")
200 self.cpp_info.defines = defines
201
202 self.cpp_info.libdirs = libdirs
203 self.cpp_info.libs = libs
204 self.cpp_info.system_libs = systemlibs
205 self.cpp_info.names["cmake_find_package"] = "TCL"
206 self.cpp_info.names["cmake_find_package_multi"] = "TCL"
207
208 if self.settings.os == "Macos":
209 self.cpp_info.frameworks = ["Cocoa"]
210 self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags
211
212 tcl_library = os.path.join(self.package_folder, "lib", "{}{}".format(self.name, ".".join(self.version.split(".")[:2])))
213 self.output.info("Setting TCL_LIBRARY environment variable to {}".format(tcl_library))
214 self.env_info.TCL_LIBRARY = tcl_library
215
216 tcl_root = self.package_folder
217 self.output.info("Setting TCL_ROOT environment variable to {}".format(tcl_root))
218 self.env_info.TCL_ROOT = tcl_root
219
220 tclsh_list = list(filter(lambda fn: fn.startswith("tclsh"), os.listdir(os.path.join(self.package_folder, "bin"))))
221 tclsh = os.path.join(self.package_folder, "bin", tclsh_list[0])
222 self.output.info("Setting TCLSH environment variable to {}".format(tclsh))
223 self.env_info.TCLSH = tclsh
224
225 bindir = os.path.join(self.package_folder, "bin")
226 self.output.info("Adding PATH environment variable: {}".format(bindir))
227 self.env_info.PATH.append(bindir)
228
[end of recipes/tcl/8.6.10/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/tcl/8.6.10/conanfile.py b/recipes/tcl/8.6.10/conanfile.py
--- a/recipes/tcl/8.6.10/conanfile.py
+++ b/recipes/tcl/8.6.10/conanfile.py
@@ -206,7 +206,7 @@
self.cpp_info.names["cmake_find_package_multi"] = "TCL"
if self.settings.os == "Macos":
- self.cpp_info.frameworks = ["Cocoa"]
+ self.cpp_info.frameworks = ["CoreFoundation"]
self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags
tcl_library = os.path.join(self.package_folder, "lib", "{}{}".format(self.name, ".".join(self.version.split(".")[:2])))
|
{"golden_diff": "diff --git a/recipes/tcl/8.6.10/conanfile.py b/recipes/tcl/8.6.10/conanfile.py\n--- a/recipes/tcl/8.6.10/conanfile.py\n+++ b/recipes/tcl/8.6.10/conanfile.py\n@@ -206,7 +206,7 @@\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"TCL\"\n \n if self.settings.os == \"Macos\":\n- self.cpp_info.frameworks = [\"Cocoa\"]\n+ self.cpp_info.frameworks = [\"CoreFoundation\"]\n self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags\n \n tcl_library = os.path.join(self.package_folder, \"lib\", \"{}{}\".format(self.name, \".\".join(self.version.split(\".\")[:2])))\n", "issue": "[package] tcl/8.6.10: tcl should be dependent to CoreFoundation on mac\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **tcl/8.6.10**\r\n * Operating System+version: **macOS 10.15.7**\r\n * Compiler+version: **Apple-Clang**\r\n * Conan version: **conan 1.36.0**\r\n * Python version: **Python 3.9.5**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Macos\r\nos_build=Macos\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=apple-clang\r\ncompiler.version=12.0\r\ncompiler.libcxx=libc++\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\ninstall tcl/8.6.10\r\n\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\n$ otool -L ~/.conan/data/tcl/8.6.10/_/_/package/88955cec2844f731470e07bd44ce5a3a24ec88b7/bin/tclsh8.6 \r\n/Users/shiena/.conan/data/tcl/8.6.10/_/_/package/88955cec2844f731470e07bd44ce5a3a24ec88b7/bin/tclsh8.6:\r\n\t/usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1281.100.1)\r\n\t/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 1677.104.0)\r\n```\r\n\r\n</details>\r\n\r\ntcl links to CoreFoundation on mac. But conanfile.py depends on Cocoa. Therefore tcl should be dependent to CoreFoundation.\r\n\r\nhttps://github.com/tcltk/tcl/blob/main/unix/configure#L1455\r\nhttps://github.com/conan-io/conan-center-index/blob/master/recipes/tcl/8.6.10/conanfile.py#L209\r\n\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass TclConan(ConanFile):\n name = \"tcl\"\n version = \"8.6.10\"\n description = \"Tcl is a very powerful but easy to learn dynamic programming language.\"\n topics = (\"conan\", \"tcl\", \"scripting\", \"programming\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://tcl.tk\"\n license = \"TCL\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"fPIC\": [True, False],\n \"shared\": [True, False]\n }\n default_options = {\n \"fPIC\": True,\n \"shared\": False,\n }\n exports_sources = (\"patches/*\")\n requires = (\"zlib/1.2.11\")\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.settings.os not in (\"Linux\", \"Macos\", \"Windows\"):\n raise ConanInvalidConfiguration(\"Unsupported os\")\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n if tools.os_info.is_windows and self.settings.compiler != \"Visual Studio\" and \\\n \"CONAN_BASH_PATH\" not in os.environ and tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/20190524\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _get_default_build_system_subdir(self):\n return {\n \"Macos\": \"macosx\",\n \"Linux\": \"unix\",\n \"Windows\": \"win\",\n }[str(self.settings.os)]\n\n def _get_configure_dir(self, build_system_subdir=None):\n if build_system_subdir is None:\n build_system_subdir = self._get_default_build_system_subdir()\n return os.path.join(self.source_folder, self._source_subfolder, build_system_subdir)\n\n def _patch_sources(self):\n unix_config_dir = self._get_configure_dir(\"unix\")\n # When disabling 64-bit support (in 32-bit), this test must be 0 in order to use \"long long\" for 64-bit ints\n # (${tcl_type_64bit} can be either \"__int64\" or \"long long\")\n tools.replace_in_file(os.path.join(unix_config_dir, \"configure\"),\n \"(sizeof(${tcl_type_64bit})==sizeof(long))\",\n \"(sizeof(${tcl_type_64bit})!=sizeof(long))\")\n\n unix_makefile_in = os.path.join(unix_config_dir, \"Makefile.in\")\n # Avoid building internal libraries as shared libraries\n tools.replace_in_file(unix_makefile_in, \"--enable-shared --enable-threads\", \"--enable-threads\")\n # Avoid clearing CFLAGS and LDFLAGS in the makefile\n tools.replace_in_file(unix_makefile_in, \"\\nCFLAGS\\t\", \"\\n#CFLAGS\\t\")\n tools.replace_in_file(unix_makefile_in, \"\\nLDFLAGS\\t\", \"\\n#LDFLAGS\\t\")\n # Use CFLAGS and CPPFLAGS as argument to CC\n tools.replace_in_file(unix_makefile_in, \"${CFLAGS}\", \"${CFLAGS} ${CPPFLAGS}\")\n # nmake creates a temporary file with mixed forward/backward slashes\n # force the filename to avoid cryptic error messages\n win_config_dir = self._get_configure_dir(\"win\")\n win_makefile_vc = os.path.join(win_config_dir, \"makefile.vc\")\n tools.replace_in_file(win_makefile_vc, \"@type << >$@\", \"type <<temp.tmp >$@\")\n\n win_rules_vc = os.path.join(self._source_subfolder, \"win\", \"rules.vc\")\n # do not treat nmake build warnings as errors\n tools.replace_in_file(win_rules_vc, \"cwarn = $(cwarn) -WX\", \"\")\n # disable whole program optimization to be portable across different MSVC versions.\n # See conan-io/conan-center-index#4811 conan-io/conan-center-index#4094\n tools.replace_in_file(\n win_rules_vc,\n \"OPTIMIZATIONS = $(OPTIMIZATIONS) -GL\",\n \"\")\n\n def _build_nmake(self, targets):\n opts = []\n # https://core.tcl.tk/tips/doc/trunk/tip/477.md\n if not self.options.shared:\n opts.append(\"static\")\n if self.settings.build_type == \"Debug\":\n opts.append(\"symbols\")\n if \"MD\" in self.settings.compiler.runtime:\n opts.append(\"msvcrt\")\n else:\n opts.append(\"nomsvcrt\")\n if \"d\" not in self.settings.compiler.runtime:\n opts.append(\"unchecked\")\n with tools.vcvars(self.settings):\n with tools.chdir(self._get_configure_dir(\"win\")):\n self.run('nmake -nologo -f \"{cfgdir}/makefile.vc\" INSTALLDIR=\"{pkgdir}\" OPTS={opts} {targets}'.format(\n cfgdir=self._get_configure_dir(\"win\"),\n pkgdir=self.package_folder,\n opts=\",\".join(opts),\n targets=\" \".join(targets),\n ))\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n conf_args = [\n \"--enable-threads\",\n \"--enable-shared\" if self.options.shared else \"--disable-shared\",\n \"--enable-symbols\" if self.settings.build_type == \"Debug\" else \"--disable-symbols\",\n \"--enable-64bit\" if self.settings.arch == \"x86_64\" else \"--disable-64bit\",\n ]\n self._autotools.configure(configure_dir=self._get_configure_dir(), args=conf_args, vars={\"PKG_CFG_ARGS\": \" \".join(conf_args)})\n\n # https://core.tcl.tk/tcl/tktview/840660e5a1\n for root, _, files in os.walk(self.build_folder):\n if \"Makefile\" in files:\n tools.replace_in_file(os.path.join(root, \"Makefile\"), \"-Dstrtod=fixstrtod\", \"\", strict=False)\n return self._autotools\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n self._patch_sources()\n if self.settings.compiler == \"Visual Studio\":\n self._build_nmake([\"release\"])\n else:\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"license.terms\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n self._build_nmake([\"install-binaries\", \"install-libraries\"])\n else:\n autotools = self._configure_autotools()\n autotools.install()\n autotools.make(target=\"install-private-headers\")\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"man\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n tclConfigShPath = os.path.join(self.package_folder, \"lib\", \"tclConfig.sh\")\n package_path = self.package_folder\n build_folder = self.build_folder\n if self.settings.os == \"Windows\" and self.settings.compiler != \"Visual Studio\":\n package_path = package_path.replace(\"\\\\\", \"/\")\n drive, path = os.path.splitdrive(self.build_folder)\n build_folder = \"\".join([drive, path.lower().replace(\"\\\\\", \"/\")])\n\n tools.replace_in_file(tclConfigShPath,\n package_path,\n \"${TCL_ROOT}\")\n tools.replace_in_file(tclConfigShPath,\n build_folder,\n \"${TCL_BUILD_ROOT}\")\n\n tools.replace_in_file(tclConfigShPath,\n \"\\nTCL_BUILD_\",\n \"\\n#TCL_BUILD_\")\n tools.replace_in_file(tclConfigShPath,\n \"\\nTCL_SRC_DIR\",\n \"\\n#TCL_SRC_DIR\")\n\n def package_info(self):\n libs = []\n systemlibs = []\n libdirs = []\n for root, _, _ in os.walk(os.path.join(self.package_folder, \"lib\"), topdown=False):\n newlibs = tools.collect_libs(self, root)\n if newlibs:\n libs.extend(newlibs)\n libdirs.append(root)\n if self.settings.os == \"Windows\":\n systemlibs.extend([\"ws2_32\", \"netapi32\", \"userenv\"])\n else:\n systemlibs.extend([\"m\", \"pthread\", \"dl\"])\n\n defines = []\n if not self.options.shared:\n defines.append(\"STATIC_BUILD\")\n self.cpp_info.defines = defines\n\n self.cpp_info.libdirs = libdirs\n self.cpp_info.libs = libs\n self.cpp_info.system_libs = systemlibs\n self.cpp_info.names[\"cmake_find_package\"] = \"TCL\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"TCL\"\n\n if self.settings.os == \"Macos\":\n self.cpp_info.frameworks = [\"Cocoa\"]\n self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags\n\n tcl_library = os.path.join(self.package_folder, \"lib\", \"{}{}\".format(self.name, \".\".join(self.version.split(\".\")[:2])))\n self.output.info(\"Setting TCL_LIBRARY environment variable to {}\".format(tcl_library))\n self.env_info.TCL_LIBRARY = tcl_library\n\n tcl_root = self.package_folder\n self.output.info(\"Setting TCL_ROOT environment variable to {}\".format(tcl_root))\n self.env_info.TCL_ROOT = tcl_root\n\n tclsh_list = list(filter(lambda fn: fn.startswith(\"tclsh\"), os.listdir(os.path.join(self.package_folder, \"bin\"))))\n tclsh = os.path.join(self.package_folder, \"bin\", tclsh_list[0])\n self.output.info(\"Setting TCLSH environment variable to {}\".format(tclsh))\n self.env_info.TCLSH = tclsh\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Adding PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/tcl/8.6.10/conanfile.py"}]}
| 4,073 | 185 |
gh_patches_debug_36455
|
rasdani/github-patches
|
git_diff
|
pyg-team__pytorch_geometric-8566
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Roadmap] PyG for Recommendation 🚀
### 🚀 The feature, motivation and pitch
**This roadmap aims to bring better support for recommendation tasks to PyG.**
Currently, all/most of our link prediction models are trained and evaluated using binary classification metrics. However, this usually requires that we have a set of candidates in advance, from which we can then infer the existence of links. This is not necessarily practical, since in most cases, we want to find the top-k most likely links from the full set of `O(N^2)` pairs.
While training can still be done via negative sampling and binary classification, this roadmap resolves around bringing better support for link prediction evaluation into PyG, with the following end-to-end pipeline:
1. Embed all source and destination nodes
1. Use "Maximum Inner Product Search" (MIPS) to find the top-k most likely links (via [`MIPSKNNIndex`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.MIPSKNNIndex.html#torch_geometric.nn.pool.MIPSKNNIndex))
1. Evaluate using common metrics for recommendation, e.g., `map@k`, `precision@k`, `recall@k`, `f1@k`, `ndcg@k`.
### Metrics
We need to support recommendation metrics, which can be updated and computed in a mini-batch fashion. A related issue can be found [here](https://github.com/pyg-team/pytorch_geometric/issues/8271). Its interface can/should follow the `torchmetrics.Metric` interface, *e.g.*:
```python
class LinkPredMetric(torchmetrics.Metric):
def __init__(self, k: int):
pass
def update(self, top_k_pred_mat: Tensor, edge_label_index: Tensor):
pass
def compute(self):
pass
```
where `top_k_pred_mat` holds the top-k indices for each left-hand-side (LHS) entity, and `edge_label_index` holds the ground-truth information as a `[2, num_targets]` matrix.
* [x] Implement `LinkPredMetric` interface
* [x] Implement `map@k`
* [x] Implement `precision@k`
* [x] Implement `recall@k`
* [x] Implement `f1@k`
* [x] Implement `ndcg@k` (#8326)
### Examples
With this, we can build one or more clear and descriptive examples of how to leverage PyG for recommendation.
* [x] Select and implement one or two datasets commonly used for recommendation
* [x] Add exclusion logic to `MIPSKNNIndex`
* [x] Build an example that implements this pipeline
* [ ] Write a tutorial about recommendation in PyG
* [ ] Advanced: Combine PyG's recommendation capabilities with its temporal GNN support (see #3230)
</issue>
<code>
[start of torch_geometric/nn/metrics.py]
1 from abc import ABC, abstractmethod
2 from typing import Optional, Tuple, Union
3
4 import torch
5 from torch import Tensor
6
7 from torch_geometric.utils import cumsum, scatter
8
9 try:
10 import torchmetrics # noqa
11 WITH_TORCHMETRICS = True
12 BaseMetric = torchmetrics.Metric
13 except Exception:
14 WITH_TORCHMETRICS = False
15 BaseMetric = torch.nn.Module
16
17
18 class LinkPredMetric(BaseMetric, ABC):
19 r"""An abstract class for computing link prediction retrieval metrics.
20
21 Args:
22 k (int): The number of top-:math:`k` predictions to evaluate
23 against.
24 """
25 is_differentiable: Optional[bool] = None
26 higher_is_better: Optional[bool] = None
27 full_state_update: Optional[bool] = None
28
29 def __init__(self, k: int):
30 super().__init__()
31
32 if k <= 0:
33 raise ValueError(f"'k' needs to be a positive integer in "
34 f"'{self.__class__.__name__}' (got {k})")
35
36 self.k = k
37
38 if WITH_TORCHMETRICS:
39 self.add_state('accum', torch.tensor(0.), dist_reduce_fx='sum')
40 self.add_state('total', torch.tensor(0), dist_reduce_fx='sum')
41 else:
42 self.register_buffer('accum', torch.tensor(0.))
43 self.register_buffer('total', torch.tensor(0))
44
45 def update(
46 self,
47 pred_index_mat: Tensor,
48 edge_label_index: Union[Tensor, Tuple[Tensor, Tensor]],
49 ):
50 r"""Updates the state variables based on the current mini-batch
51 prediction.
52
53 :meth:`update` can be repeated multiple times to accumulate the results
54 of successive predictions, *e.g.*, inside a mini-batch training or
55 evaluation loop.
56
57 Args:
58 pred_index_mat (torch.Tensor): The top-:math:`k` predictions of
59 every example in the mini-batch with shape
60 :obj:`[batch_size, k]`.
61 edge_label_index (torch.Tensor): The ground-truth indices for every
62 example in the mini-batch, given in COO format of shape
63 :obj:`[2, num_ground_truth_indices]`.
64 """
65 if pred_index_mat.size(1) != self.k:
66 raise ValueError(f"Expected 'pred_index_mat' to hold {self.k} "
67 f"many indices for every entry "
68 f"(got {pred_index_mat.size(1)})")
69
70 # Compute a boolean matrix indicating if the k-th prediction is part of
71 # the ground-truth. We do this by flattening both prediction and
72 # target indices, and then determining overlaps via `torch.isin`.
73 max_index = max(
74 pred_index_mat.max() if pred_index_mat.numel() > 0 else 0,
75 edge_label_index[1].max()
76 if edge_label_index[1].numel() > 0 else 0,
77 ) + 1
78 arange = torch.arange(
79 start=0,
80 end=max_index * pred_index_mat.size(0),
81 step=max_index,
82 device=pred_index_mat.device,
83 ).view(-1, 1)
84 flat_pred_index = (pred_index_mat + arange).view(-1)
85 flat_y_index = max_index * edge_label_index[0] + edge_label_index[1]
86
87 pred_isin_mat = torch.isin(flat_pred_index, flat_y_index)
88 pred_isin_mat = pred_isin_mat.view(pred_index_mat.size())
89
90 # Compute the number of targets per example:
91 y_count = scatter(
92 torch.ones_like(edge_label_index[0]),
93 edge_label_index[0],
94 dim=0,
95 dim_size=pred_index_mat.size(0),
96 reduce='sum',
97 )
98
99 metric = self._compute(pred_isin_mat, y_count)
100
101 self.accum += metric.sum()
102 self.total += (y_count > 0).sum()
103
104 def compute(self) -> Tensor:
105 r"""Computes the final metric value."""
106 if self.total == 0:
107 return torch.zeros_like(self.accum)
108 return self.accum / self.total
109
110 def reset(self) -> 'LinkPredMetric':
111 r"""Reset metric state variables to their default value."""
112 if WITH_TORCHMETRICS:
113 super().reset()
114 else:
115 self.accum.zero_()
116 self.total.zero_()
117
118 return self
119
120 @abstractmethod
121 def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:
122 r"""Compute the specific metric.
123 To be implemented separately for each metric class.
124
125 Args:
126 pred_isin_mat (torch.Tensor): A boolean matrix whose :obj:`(i,k)`
127 element indicates if the :obj:`k`-th prediction for the
128 :obj:`i`-th example is correct or not.
129 y_count (torch.Tensor): A vector indicating the number of
130 ground-truth labels for each example.
131 """
132 raise NotImplementedError
133
134 def __repr__(self) -> str:
135 return f'{self.__class__.__name__}({self.k})'
136
137
138 class LinkPredPrecision(LinkPredMetric):
139 r"""A link prediction metric to compute Precision@:math`k`.
140
141 Args:
142 k (int): The number of top-:math:`k` predictions to evaluate
143 against.
144 """
145 is_differentiable: bool = False
146 higher_is_better: bool = True
147 full_state_update: bool = False
148
149 def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:
150 return pred_isin_mat.sum(dim=-1) / self.k
151
152
153 class LinkPredNDCG(LinkPredMetric):
154 r"""A link prediction metric to compute the Normalized Discounted
155 Cumulative Gain (NDCG).
156
157 Args:
158 k (int): The number of top-:math:`k` predictions to evaluate
159 against.
160 """
161 is_differentiable: bool = False
162 higher_is_better: bool = True
163 full_state_update: bool = False
164
165 def __init__(self, k: int):
166 super().__init__(k=k)
167
168 dtype = torch.get_default_dtype()
169 multiplier = 1.0 / torch.arange(2, k + 2, dtype=dtype).log2()
170
171 self.register_buffer('multiplier', multiplier)
172 self.register_buffer('idcg', cumsum(multiplier))
173
174 def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:
175 dcg = (pred_isin_mat * self.multiplier.view(1, -1)).sum(dim=-1)
176 idcg = self.idcg[y_count.clamp(max=self.k)]
177
178 out = dcg / idcg
179 out[out.isnan() | out.isinf()] = 0.0
180 return out
181
[end of torch_geometric/nn/metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch_geometric/nn/metrics.py b/torch_geometric/nn/metrics.py
--- a/torch_geometric/nn/metrics.py
+++ b/torch_geometric/nn/metrics.py
@@ -19,12 +19,11 @@
r"""An abstract class for computing link prediction retrieval metrics.
Args:
- k (int): The number of top-:math:`k` predictions to evaluate
- against.
+ k (int): The number of top-:math:`k` predictions to evaluate against.
"""
- is_differentiable: Optional[bool] = None
+ is_differentiable: bool = False
+ full_state_update: bool = False
higher_is_better: Optional[bool] = None
- full_state_update: Optional[bool] = None
def __init__(self, k: int):
super().__init__()
@@ -132,35 +131,44 @@
raise NotImplementedError
def __repr__(self) -> str:
- return f'{self.__class__.__name__}({self.k})'
+ return f'{self.__class__.__name__}(k={self.k})'
class LinkPredPrecision(LinkPredMetric):
r"""A link prediction metric to compute Precision@:math`k`.
Args:
- k (int): The number of top-:math:`k` predictions to evaluate
- against.
+ k (int): The number of top-:math:`k` predictions to evaluate against.
"""
- is_differentiable: bool = False
higher_is_better: bool = True
- full_state_update: bool = False
def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:
return pred_isin_mat.sum(dim=-1) / self.k
+class LinkPredRecall(LinkPredMetric):
+ r"""A link prediction metric to compute Recall@:math:`k`.
+
+ Args:
+ k (int): The number of top-:math:`k` predictions to evaluate against.
+ """
+ higher_is_better: bool = True
+
+ def __init__(self, k: int):
+ super().__init__(k)
+
+ def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:
+ return pred_isin_mat.sum(dim=1) / y_count.clamp(min=1e-7)
+
+
class LinkPredNDCG(LinkPredMetric):
r"""A link prediction metric to compute the Normalized Discounted
Cumulative Gain (NDCG).
Args:
- k (int): The number of top-:math:`k` predictions to evaluate
- against.
+ k (int): The number of top-:math:`k` predictions to evaluate against.
"""
- is_differentiable: bool = False
higher_is_better: bool = True
- full_state_update: bool = False
def __init__(self, k: int):
super().__init__(k=k)
|
{"golden_diff": "diff --git a/torch_geometric/nn/metrics.py b/torch_geometric/nn/metrics.py\n--- a/torch_geometric/nn/metrics.py\n+++ b/torch_geometric/nn/metrics.py\n@@ -19,12 +19,11 @@\n r\"\"\"An abstract class for computing link prediction retrieval metrics.\n \n Args:\n- k (int): The number of top-:math:`k` predictions to evaluate\n- against.\n+ k (int): The number of top-:math:`k` predictions to evaluate against.\n \"\"\"\n- is_differentiable: Optional[bool] = None\n+ is_differentiable: bool = False\n+ full_state_update: bool = False\n higher_is_better: Optional[bool] = None\n- full_state_update: Optional[bool] = None\n \n def __init__(self, k: int):\n super().__init__()\n@@ -132,35 +131,44 @@\n raise NotImplementedError\n \n def __repr__(self) -> str:\n- return f'{self.__class__.__name__}({self.k})'\n+ return f'{self.__class__.__name__}(k={self.k})'\n \n \n class LinkPredPrecision(LinkPredMetric):\n r\"\"\"A link prediction metric to compute Precision@:math`k`.\n \n Args:\n- k (int): The number of top-:math:`k` predictions to evaluate\n- against.\n+ k (int): The number of top-:math:`k` predictions to evaluate against.\n \"\"\"\n- is_differentiable: bool = False\n higher_is_better: bool = True\n- full_state_update: bool = False\n \n def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:\n return pred_isin_mat.sum(dim=-1) / self.k\n \n \n+class LinkPredRecall(LinkPredMetric):\n+ r\"\"\"A link prediction metric to compute Recall@:math:`k`.\n+\n+ Args:\n+ k (int): The number of top-:math:`k` predictions to evaluate against.\n+ \"\"\"\n+ higher_is_better: bool = True\n+\n+ def __init__(self, k: int):\n+ super().__init__(k)\n+\n+ def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:\n+ return pred_isin_mat.sum(dim=1) / y_count.clamp(min=1e-7)\n+\n+\n class LinkPredNDCG(LinkPredMetric):\n r\"\"\"A link prediction metric to compute the Normalized Discounted\n Cumulative Gain (NDCG).\n \n Args:\n- k (int): The number of top-:math:`k` predictions to evaluate\n- against.\n+ k (int): The number of top-:math:`k` predictions to evaluate against.\n \"\"\"\n- is_differentiable: bool = False\n higher_is_better: bool = True\n- full_state_update: bool = False\n \n def __init__(self, k: int):\n super().__init__(k=k)\n", "issue": "[Roadmap] PyG for Recommendation \ud83d\ude80\n### \ud83d\ude80 The feature, motivation and pitch\r\n\r\n**This roadmap aims to bring better support for recommendation tasks to PyG.**\r\n\r\nCurrently, all/most of our link prediction models are trained and evaluated using binary classification metrics. However, this usually requires that we have a set of candidates in advance, from which we can then infer the existence of links. This is not necessarily practical, since in most cases, we want to find the top-k most likely links from the full set of `O(N^2)` pairs.\r\n\r\nWhile training can still be done via negative sampling and binary classification, this roadmap resolves around bringing better support for link prediction evaluation into PyG, with the following end-to-end pipeline:\r\n1. Embed all source and destination nodes\r\n1. Use \"Maximum Inner Product Search\" (MIPS) to find the top-k most likely links (via [`MIPSKNNIndex`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.MIPSKNNIndex.html#torch_geometric.nn.pool.MIPSKNNIndex))\r\n1. Evaluate using common metrics for recommendation, e.g., `map@k`, `precision@k`, `recall@k`, `f1@k`, `ndcg@k`.\r\n\r\n### Metrics\r\n\r\nWe need to support recommendation metrics, which can be updated and computed in a mini-batch fashion. A related issue can be found [here](https://github.com/pyg-team/pytorch_geometric/issues/8271). Its interface can/should follow the `torchmetrics.Metric` interface, *e.g.*:\r\n```python\r\nclass LinkPredMetric(torchmetrics.Metric):\r\n def __init__(self, k: int):\r\n pass\r\n\r\n def update(self, top_k_pred_mat: Tensor, edge_label_index: Tensor):\r\n pass\r\n\r\n def compute(self):\r\n pass\r\n```\r\nwhere `top_k_pred_mat` holds the top-k indices for each left-hand-side (LHS) entity, and `edge_label_index` holds the ground-truth information as a `[2, num_targets]` matrix.\r\n\r\n* [x] Implement `LinkPredMetric` interface\r\n* [x] Implement `map@k`\r\n* [x] Implement `precision@k`\r\n* [x] Implement `recall@k`\r\n* [x] Implement `f1@k`\r\n* [x] Implement `ndcg@k` (#8326)\r\n\r\n### Examples\r\n\r\nWith this, we can build one or more clear and descriptive examples of how to leverage PyG for recommendation.\r\n\r\n* [x] Select and implement one or two datasets commonly used for recommendation\r\n* [x] Add exclusion logic to `MIPSKNNIndex`\r\n* [x] Build an example that implements this pipeline\r\n* [ ] Write a tutorial about recommendation in PyG\r\n* [ ] Advanced: Combine PyG's recommendation capabilities with its temporal GNN support (see #3230)\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.utils import cumsum, scatter\n\ntry:\n import torchmetrics # noqa\n WITH_TORCHMETRICS = True\n BaseMetric = torchmetrics.Metric\nexcept Exception:\n WITH_TORCHMETRICS = False\n BaseMetric = torch.nn.Module\n\n\nclass LinkPredMetric(BaseMetric, ABC):\n r\"\"\"An abstract class for computing link prediction retrieval metrics.\n\n Args:\n k (int): The number of top-:math:`k` predictions to evaluate\n against.\n \"\"\"\n is_differentiable: Optional[bool] = None\n higher_is_better: Optional[bool] = None\n full_state_update: Optional[bool] = None\n\n def __init__(self, k: int):\n super().__init__()\n\n if k <= 0:\n raise ValueError(f\"'k' needs to be a positive integer in \"\n f\"'{self.__class__.__name__}' (got {k})\")\n\n self.k = k\n\n if WITH_TORCHMETRICS:\n self.add_state('accum', torch.tensor(0.), dist_reduce_fx='sum')\n self.add_state('total', torch.tensor(0), dist_reduce_fx='sum')\n else:\n self.register_buffer('accum', torch.tensor(0.))\n self.register_buffer('total', torch.tensor(0))\n\n def update(\n self,\n pred_index_mat: Tensor,\n edge_label_index: Union[Tensor, Tuple[Tensor, Tensor]],\n ):\n r\"\"\"Updates the state variables based on the current mini-batch\n prediction.\n\n :meth:`update` can be repeated multiple times to accumulate the results\n of successive predictions, *e.g.*, inside a mini-batch training or\n evaluation loop.\n\n Args:\n pred_index_mat (torch.Tensor): The top-:math:`k` predictions of\n every example in the mini-batch with shape\n :obj:`[batch_size, k]`.\n edge_label_index (torch.Tensor): The ground-truth indices for every\n example in the mini-batch, given in COO format of shape\n :obj:`[2, num_ground_truth_indices]`.\n \"\"\"\n if pred_index_mat.size(1) != self.k:\n raise ValueError(f\"Expected 'pred_index_mat' to hold {self.k} \"\n f\"many indices for every entry \"\n f\"(got {pred_index_mat.size(1)})\")\n\n # Compute a boolean matrix indicating if the k-th prediction is part of\n # the ground-truth. We do this by flattening both prediction and\n # target indices, and then determining overlaps via `torch.isin`.\n max_index = max(\n pred_index_mat.max() if pred_index_mat.numel() > 0 else 0,\n edge_label_index[1].max()\n if edge_label_index[1].numel() > 0 else 0,\n ) + 1\n arange = torch.arange(\n start=0,\n end=max_index * pred_index_mat.size(0),\n step=max_index,\n device=pred_index_mat.device,\n ).view(-1, 1)\n flat_pred_index = (pred_index_mat + arange).view(-1)\n flat_y_index = max_index * edge_label_index[0] + edge_label_index[1]\n\n pred_isin_mat = torch.isin(flat_pred_index, flat_y_index)\n pred_isin_mat = pred_isin_mat.view(pred_index_mat.size())\n\n # Compute the number of targets per example:\n y_count = scatter(\n torch.ones_like(edge_label_index[0]),\n edge_label_index[0],\n dim=0,\n dim_size=pred_index_mat.size(0),\n reduce='sum',\n )\n\n metric = self._compute(pred_isin_mat, y_count)\n\n self.accum += metric.sum()\n self.total += (y_count > 0).sum()\n\n def compute(self) -> Tensor:\n r\"\"\"Computes the final metric value.\"\"\"\n if self.total == 0:\n return torch.zeros_like(self.accum)\n return self.accum / self.total\n\n def reset(self) -> 'LinkPredMetric':\n r\"\"\"Reset metric state variables to their default value.\"\"\"\n if WITH_TORCHMETRICS:\n super().reset()\n else:\n self.accum.zero_()\n self.total.zero_()\n\n return self\n\n @abstractmethod\n def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:\n r\"\"\"Compute the specific metric.\n To be implemented separately for each metric class.\n\n Args:\n pred_isin_mat (torch.Tensor): A boolean matrix whose :obj:`(i,k)`\n element indicates if the :obj:`k`-th prediction for the\n :obj:`i`-th example is correct or not.\n y_count (torch.Tensor): A vector indicating the number of\n ground-truth labels for each example.\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}({self.k})'\n\n\nclass LinkPredPrecision(LinkPredMetric):\n r\"\"\"A link prediction metric to compute Precision@:math`k`.\n\n Args:\n k (int): The number of top-:math:`k` predictions to evaluate\n against.\n \"\"\"\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = False\n\n def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:\n return pred_isin_mat.sum(dim=-1) / self.k\n\n\nclass LinkPredNDCG(LinkPredMetric):\n r\"\"\"A link prediction metric to compute the Normalized Discounted\n Cumulative Gain (NDCG).\n\n Args:\n k (int): The number of top-:math:`k` predictions to evaluate\n against.\n \"\"\"\n is_differentiable: bool = False\n higher_is_better: bool = True\n full_state_update: bool = False\n\n def __init__(self, k: int):\n super().__init__(k=k)\n\n dtype = torch.get_default_dtype()\n multiplier = 1.0 / torch.arange(2, k + 2, dtype=dtype).log2()\n\n self.register_buffer('multiplier', multiplier)\n self.register_buffer('idcg', cumsum(multiplier))\n\n def _compute(self, pred_isin_mat: Tensor, y_count: Tensor) -> Tensor:\n dcg = (pred_isin_mat * self.multiplier.view(1, -1)).sum(dim=-1)\n idcg = self.idcg[y_count.clamp(max=self.k)]\n\n out = dcg / idcg\n out[out.isnan() | out.isinf()] = 0.0\n return out\n", "path": "torch_geometric/nn/metrics.py"}]}
| 3,104 | 680 |
gh_patches_debug_179
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-764
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cuda.cupy.clip errors
If I runt he code
`cuda.cupy.clip(cuda.cupy.arange(10), 2, 7)`
I get the following error
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-e529e5fea46e> in <module>()
----> 1 cuda.cupy.clip(cuda.cupy.arange(10), 2, 7)
/usr/local/lib/python2.7/dist-packages/cupy/math/misc.pyc in clip(a, a_min, a_max, out)
24 '''
25 # TODO(okuta): check type
---> 26 return a(a_min, a_max, out=out)
27
28
TypeError: 'cupy.core.core.ndarray' object is not callable
```
Expected output via numpy code `np.clip(np.arange(10), 2, 7)` is `array([2, 2, 2, 3, 4, 5, 6, 7, 7, 7])`
</issue>
<code>
[start of cupy/math/misc.py]
1 from cupy import core
2
3
4 # TODO(okuta): Implement convolve
5
6
7 def clip(a, a_min, a_max, out=None):
8 '''Clips the values of an array to a given interval.
9
10 This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
11 function is more efficient.
12
13 Args:
14 a (cupy.ndarray): The source array.
15 a_min (scalar or cupy.ndarray): The left side of the interval.
16 a_max (scalar or cupy.ndarray): The right side of the interval.
17 out (cupy.ndarray): Output array.
18
19 Returns:
20 cupy.ndarray: Clipped array.
21
22 .. seealso:: :func:`numpy.clip`
23
24 '''
25 # TODO(okuta): check type
26 return a(a_min, a_max, out=out)
27
28
29 sqrt = core.create_ufunc(
30 'cupy_sqrt',
31 # I think this order is a bug of NumPy, though we select this "buggy"
32 # behavior for compatibility with NumPy.
33 ('f->f', 'd->d', 'e->e'),
34 'out0 = sqrt(in0)',
35 doc='''Elementwise positive square-root function.
36
37 .. note::
38 This ufunc outputs float32 arrays for float16 arrays input by default as
39 well as NumPy 1.9. If you want to override this behavior, specify the
40 dtype argument explicitly, or use ``cupy.math.misc.sqrt_fixed`` instead.
41
42 .. seealso:: :data:`numpy.sqrt`
43
44 ''')
45
46
47 sqrt_fixed = core.sqrt_fixed
48
49
50 square = core.create_ufunc(
51 'cupy_square',
52 ('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
53 'Q->Q', 'e->e', 'f->f', 'd->d'),
54 'out0 = in0 * in0',
55 doc='''Elementwise square function.
56
57 .. seealso:: :data:`numpy.square`
58
59 ''')
60
61
62 absolute = core.absolute
63
64
65 # TODO(beam2d): Implement it
66 # fabs
67
68
69 _unsigned_sign = 'out0 = in0 > 0'
70 sign = core.create_ufunc(
71 'cupy_sign',
72 ('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
73 'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
74 'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d'),
75 'out0 = (in0 > 0) - (in0 < 0)',
76 doc='''Elementwise sign function.
77
78 It returns -1, 0, or 1 depending on the sign of the input.
79
80 .. seealso:: :data:`numpy.sign`
81
82 ''')
83
84
85 _float_maximum = \
86 'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : max(in0, in1)'
87 maximum = core.create_ufunc(
88 'cupy_maximum',
89 ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
90 'LL->L', 'qq->q', 'QQ->Q',
91 ('ee->e', _float_maximum),
92 ('ff->f', _float_maximum),
93 ('dd->d', _float_maximum)),
94 'out0 = max(in0, in1)',
95 doc='''Takes the maximum of two arrays elementwise.
96
97 If NaN appears, it returns the NaN.
98
99 .. seealso:: :data:`numpy.maximum`
100
101 ''')
102
103
104 _float_minimum = \
105 'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : min(in0, in1)'
106 minimum = core.create_ufunc(
107 'cupy_minimum',
108 ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
109 'LL->L', 'qq->q', 'QQ->Q',
110 ('ee->e', _float_minimum),
111 ('ff->f', _float_minimum),
112 ('dd->d', _float_minimum)),
113 'out0 = min(in0, in1)',
114 doc='''Takes the minimum of two arrays elementwise.
115
116 If NaN appears, it returns the NaN.
117
118 .. seealso:: :data:`numpy.minimum`
119
120 ''')
121
122
123 fmax = core.create_ufunc(
124 'cupy_fmax',
125 ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
126 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
127 'out0 = max(in0, in1)',
128 doc='''Takes the maximum of two arrays elementwise.
129
130 If NaN appears, it returns the other operand.
131
132 .. seealso:: :data:`numpy.fmax`
133
134 ''')
135
136
137 fmin = core.create_ufunc(
138 'cupy_fmin',
139 ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
140 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
141 'out0 = min(in0, in1)',
142 doc='''Takes the minimum of two arrays elementwise.
143
144 If NaN apperas, it returns the other operand.
145
146 .. seealso:: :data:`numpy.fmin`
147
148 ''')
149
150
151 # TODO(okuta): Implement nan_to_num
152
153
154 # TODO(okuta): Implement real_if_close
155
156
157 # TODO(okuta): Implement interp
158
[end of cupy/math/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/math/misc.py b/cupy/math/misc.py
--- a/cupy/math/misc.py
+++ b/cupy/math/misc.py
@@ -23,7 +23,7 @@
'''
# TODO(okuta): check type
- return a(a_min, a_max, out=out)
+ return a.clip(a_min, a_max, out=out)
sqrt = core.create_ufunc(
|
{"golden_diff": "diff --git a/cupy/math/misc.py b/cupy/math/misc.py\n--- a/cupy/math/misc.py\n+++ b/cupy/math/misc.py\n@@ -23,7 +23,7 @@\n \n '''\n # TODO(okuta): check type\n- return a(a_min, a_max, out=out)\n+ return a.clip(a_min, a_max, out=out)\n \n \n sqrt = core.create_ufunc(\n", "issue": "cuda.cupy.clip errors \nIf I runt he code \n\n`cuda.cupy.clip(cuda.cupy.arange(10), 2, 7)`\n\nI get the following error\n\n```\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-7-e529e5fea46e> in <module>()\n----> 1 cuda.cupy.clip(cuda.cupy.arange(10), 2, 7)\n\n/usr/local/lib/python2.7/dist-packages/cupy/math/misc.pyc in clip(a, a_min, a_max, out)\n 24 '''\n 25 # TODO(okuta): check type\n---> 26 return a(a_min, a_max, out=out)\n 27 \n 28 \n\nTypeError: 'cupy.core.core.ndarray' object is not callable\n```\n\nExpected output via numpy code `np.clip(np.arange(10), 2, 7)` is `array([2, 2, 2, 3, 4, 5, 6, 7, 7, 7])`\n\n", "before_files": [{"content": "from cupy import core\n\n\n# TODO(okuta): Implement convolve\n\n\ndef clip(a, a_min, a_max, out=None):\n '''Clips the values of an array to a given interval.\n\n This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this\n function is more efficient.\n\n Args:\n a (cupy.ndarray): The source array.\n a_min (scalar or cupy.ndarray): The left side of the interval.\n a_max (scalar or cupy.ndarray): The right side of the interval.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Clipped array.\n\n .. seealso:: :func:`numpy.clip`\n\n '''\n # TODO(okuta): check type\n return a(a_min, a_max, out=out)\n\n\nsqrt = core.create_ufunc(\n 'cupy_sqrt',\n # I think this order is a bug of NumPy, though we select this \"buggy\"\n # behavior for compatibility with NumPy.\n ('f->f', 'd->d', 'e->e'),\n 'out0 = sqrt(in0)',\n doc='''Elementwise positive square-root function.\n\n .. note::\n This ufunc outputs float32 arrays for float16 arrays input by default as\n well as NumPy 1.9. If you want to override this behavior, specify the\n dtype argument explicitly, or use ``cupy.math.misc.sqrt_fixed`` instead.\n\n .. seealso:: :data:`numpy.sqrt`\n\n ''')\n\n\nsqrt_fixed = core.sqrt_fixed\n\n\nsquare = core.create_ufunc(\n 'cupy_square',\n ('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',\n 'Q->Q', 'e->e', 'f->f', 'd->d'),\n 'out0 = in0 * in0',\n doc='''Elementwise square function.\n\n .. seealso:: :data:`numpy.square`\n\n ''')\n\n\nabsolute = core.absolute\n\n\n# TODO(beam2d): Implement it\n# fabs\n\n\n_unsigned_sign = 'out0 = in0 > 0'\nsign = core.create_ufunc(\n 'cupy_sign',\n ('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),\n 'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),\n 'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d'),\n 'out0 = (in0 > 0) - (in0 < 0)',\n doc='''Elementwise sign function.\n\n It returns -1, 0, or 1 depending on the sign of the input.\n\n .. seealso:: :data:`numpy.sign`\n\n ''')\n\n\n_float_maximum = \\\n 'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : max(in0, in1)'\nmaximum = core.create_ufunc(\n 'cupy_maximum',\n ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',\n 'LL->L', 'qq->q', 'QQ->Q',\n ('ee->e', _float_maximum),\n ('ff->f', _float_maximum),\n ('dd->d', _float_maximum)),\n 'out0 = max(in0, in1)',\n doc='''Takes the maximum of two arrays elementwise.\n\n If NaN appears, it returns the NaN.\n\n .. seealso:: :data:`numpy.maximum`\n\n ''')\n\n\n_float_minimum = \\\n 'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : min(in0, in1)'\nminimum = core.create_ufunc(\n 'cupy_minimum',\n ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',\n 'LL->L', 'qq->q', 'QQ->Q',\n ('ee->e', _float_minimum),\n ('ff->f', _float_minimum),\n ('dd->d', _float_minimum)),\n 'out0 = min(in0, in1)',\n doc='''Takes the minimum of two arrays elementwise.\n\n If NaN appears, it returns the NaN.\n\n .. seealso:: :data:`numpy.minimum`\n\n ''')\n\n\nfmax = core.create_ufunc(\n 'cupy_fmax',\n ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',\n 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = max(in0, in1)',\n doc='''Takes the maximum of two arrays elementwise.\n\n If NaN appears, it returns the other operand.\n\n .. seealso:: :data:`numpy.fmax`\n\n ''')\n\n\nfmin = core.create_ufunc(\n 'cupy_fmin',\n ('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',\n 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = min(in0, in1)',\n doc='''Takes the minimum of two arrays elementwise.\n\n If NaN apperas, it returns the other operand.\n\n .. seealso:: :data:`numpy.fmin`\n\n ''')\n\n\n# TODO(okuta): Implement nan_to_num\n\n\n# TODO(okuta): Implement real_if_close\n\n\n# TODO(okuta): Implement interp\n", "path": "cupy/math/misc.py"}]}
| 2,520 | 92 |
gh_patches_debug_42153
|
rasdani/github-patches
|
git_diff
|
lhotse-speech__lhotse-5
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
comment
would be nice to have a comment here mentioning that DummySet contains everything.
https://github.com/pzelasko/lhotse/blob/7555df605def57836c9454ae44aac95c504d86b0/lhotse/audio.py#L77
</issue>
<code>
[start of lhotse/utils.py]
1 from pathlib import Path
2 from typing import Union
3
4 Pathlike = Union[Path, str]
5
6 INT16MAX = 32768
7
8
9 class DummySet:
10 def __contains__(self, item):
11 return True
12
13 def intersection(self, iterable):
14 return True
15
[end of lhotse/utils.py]
[start of lhotse/audio.py]
1 from dataclasses import dataclass, asdict
2 from subprocess import run, PIPE
3 from typing import List, Optional, Dict, Union
4
5 import librosa
6 import numpy as np
7 import yaml
8
9 from lhotse.utils import Pathlike, INT16MAX, DummySet
10
11 Channels = Union[int, List[int]]
12
13
14 @dataclass
15 class AudioSource:
16 """
17 AudioSource represents audio data that can be retrieved from somewhere.
18 Supported sources of audio are currently:
19 - a file (possibly multi-channel)
20 - a command/unix pipe (single-channel only)
21 - a collection of any of the above (see AudioSourceCollection)
22 """
23 type: str
24 channel_ids: List[int]
25 source: str
26
27 def load_audio(
28 self,
29 offset_seconds: float = 0.0,
30 duration_seconds: Optional[float] = None
31 ) -> np.ndarray:
32 assert self.type in ('file', 'command')
33
34 if self.type == 'file':
35 # TODO(pzelasko): make sure that librosa loads multi-channel audio
36 # in the expected format (n_channels, n_samples)
37 return librosa.load(
38 self.source,
39 sr=None, # 'None' uses the native sampling rate
40 offset=offset_seconds,
41 duration=duration_seconds
42 )[0] # discard returned sampling rate
43
44 # TODO(pzelasko): the following naively assumes we're dealing with raw PCM...
45 # not sure if that's how we should do it
46 # also, how should we support chunking for commands?
47 raw_audio = run(self.source, shell=True, stdout=PIPE).stdout
48 int16_audio = np.frombuffer(raw_audio, dtype=np.int16)
49 return int16_audio / INT16MAX
50
51
52 @dataclass
53 class Recording:
54 """
55 Recording represents an AudioSource along with some metadata.
56 """
57 id: str
58 sources: List[AudioSource]
59 sampling_rate: int
60 num_samples: int
61 duration_seconds: float
62
63 def __post_init__(self):
64 self.sources = [AudioSource(**s) if isinstance(s, dict) else s for s in self.sources]
65
66 @property
67 def num_channels(self):
68 return sum(len(source.channel_ids) for source in self.sources)
69
70 def load_audio(
71 self,
72 channels: Optional[Channels] = None,
73 offset_seconds: float = 0.0,
74 duration_seconds: Optional[float] = None
75 ) -> np.ndarray:
76 if channels is None:
77 channels = DummySet()
78 elif isinstance(channels, int):
79 channels = frozenset([channels])
80 else:
81 channels = frozenset(channels)
82
83 samples_per_source = []
84 for source in self.sources:
85 # Case: source not requested
86 if not channels.intersection(source.channel_ids):
87 continue
88 samples = source.load_audio(offset_seconds=offset_seconds, duration_seconds=duration_seconds)
89
90 # Case: two-channel audio file but only one channel requested
91 # it might not be optimal to load all channels, but IDK if there's anything we can do about it
92 channels_to_remove = [
93 idx for idx, cid in enumerate(source.channel_ids)
94 if cid not in channels
95 ]
96 if channels_to_remove:
97 samples = np.delete(samples, channels_to_remove, axis=0)
98 samples_per_source.append(samples)
99
100 # shapes: single-channel (n_samples); multi-channel (n_channels, n_samples)
101 return np.vstack(samples_per_source) if len(samples_per_source) > 1 else samples_per_source[0]
102
103
104 @dataclass
105 class AudioSet:
106 """
107 AudioSet represents a dataset of recordings. It does not contain any annotation -
108 just the information needed to retrieve a recording (possibly multi-channel, from files
109 or from shell commands and pipes) and some metadata for each of them.
110
111 It also supports (de)serialization to/from YAML and takes care of mapping between
112 rich Python classes and YAML primitives during conversion.
113 """
114 recordings: Dict[str, Recording]
115
116 @staticmethod
117 def from_yaml(path: Pathlike) -> 'AudioSet':
118 with open(path) as f:
119 recordings = (Recording(**raw_rec) for raw_rec in yaml.safe_load(f))
120 return AudioSet(recordings={r.id: r for r in recordings})
121
122 def to_yaml(self, path: Pathlike):
123 with open(path, 'w') as f:
124 yaml.safe_dump([asdict(r) for r in self.recordings.values()], stream=f)
125
126 def load_audio(
127 self,
128 recording_id: str,
129 channels: Optional[Channels] = None,
130 offset_seconds: float = 0.0,
131 duration_seconds: Optional[float] = None
132 ) -> np.ndarray:
133 return self.recordings[recording_id].load_audio(
134 channels=channels,
135 offset_seconds=offset_seconds,
136 duration_seconds=duration_seconds
137 )
138
139 def num_channels(self, recording_id: str) -> int:
140 return self.recordings[recording_id].num_channels
141
142 def sampling_rate(self, recording_id: str) -> int:
143 return self.recordings[recording_id].sampling_rate
144
145 def num_samples(self, recording_id: str) -> int:
146 return self.recordings[recording_id].num_samples
147
148 def duration_seconds(self, recording_id: str) -> float:
149 return self.recordings[recording_id].duration_seconds
150
151 def __iter__(self):
152 return iter(self.recordings.values())
153
[end of lhotse/audio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lhotse/audio.py b/lhotse/audio.py
--- a/lhotse/audio.py
+++ b/lhotse/audio.py
@@ -1,4 +1,6 @@
from dataclasses import dataclass, asdict
+from dataclasses import dataclass, asdict
+from io import BytesIO
from subprocess import run, PIPE
from typing import List, Optional, Dict, Union
@@ -6,7 +8,7 @@
import numpy as np
import yaml
-from lhotse.utils import Pathlike, INT16MAX, DummySet
+from lhotse.utils import Pathlike, SetContainingAnything
Channels = Union[int, List[int]]
@@ -16,9 +18,8 @@
"""
AudioSource represents audio data that can be retrieved from somewhere.
Supported sources of audio are currently:
- - a file (possibly multi-channel)
- - a command/unix pipe (single-channel only)
- - a collection of any of the above (see AudioSourceCollection)
+ - a file (formats supported by librosa, possibly multi-channel)
+ - a command/unix pipe (must be WAVE, possibly multi-channel)
"""
type: str
channel_ids: List[int]
@@ -29,24 +30,31 @@
offset_seconds: float = 0.0,
duration_seconds: Optional[float] = None
) -> np.ndarray:
+ """
+ Load the AudioSource (both files and commands) with librosa,
+ accounting for many audio formats and multi-channel inputs.
+ Returns numpy array with shapes: (n_samples) for single-channel,
+ (n_channels, n_samples) for multi-channel.
+ """
assert self.type in ('file', 'command')
- if self.type == 'file':
- # TODO(pzelasko): make sure that librosa loads multi-channel audio
- # in the expected format (n_channels, n_samples)
- return librosa.load(
- self.source,
- sr=None, # 'None' uses the native sampling rate
- offset=offset_seconds,
- duration=duration_seconds
- )[0] # discard returned sampling rate
+ if self.type == 'command':
+ if offset_seconds != 0.0 or duration_seconds is not None:
+ # TODO(pzelasko): How should we support chunking for commands?
+ # We risk being very inefficient when reading many chunks from the same file
+ # without some caching scheme, because we'll be re-running commands.
+ raise ValueError("Reading audio chunks from command AudioSource type is currently not supported.")
+ source = BytesIO(run(self.source, shell=True, stdout=PIPE).stdout)
+ else:
+ source = self.source
- # TODO(pzelasko): the following naively assumes we're dealing with raw PCM...
- # not sure if that's how we should do it
- # also, how should we support chunking for commands?
- raw_audio = run(self.source, shell=True, stdout=PIPE).stdout
- int16_audio = np.frombuffer(raw_audio, dtype=np.int16)
- return int16_audio / INT16MAX
+ return librosa.load(
+ source,
+ sr=None, # 'None' uses the native sampling rate
+ mono=False, # Retain multi-channel if it's there
+ offset=offset_seconds,
+ duration=duration_seconds
+ )[0] # discard returned sampling rate
@dataclass
@@ -74,7 +82,7 @@
duration_seconds: Optional[float] = None
) -> np.ndarray:
if channels is None:
- channels = DummySet()
+ channels = SetContainingAnything()
elif isinstance(channels, int):
channels = frozenset([channels])
else:
diff --git a/lhotse/utils.py b/lhotse/utils.py
--- a/lhotse/utils.py
+++ b/lhotse/utils.py
@@ -6,7 +6,7 @@
INT16MAX = 32768
-class DummySet:
+class SetContainingAnything:
def __contains__(self, item):
return True
|
{"golden_diff": "diff --git a/lhotse/audio.py b/lhotse/audio.py\n--- a/lhotse/audio.py\n+++ b/lhotse/audio.py\n@@ -1,4 +1,6 @@\n from dataclasses import dataclass, asdict\n+from dataclasses import dataclass, asdict\n+from io import BytesIO\n from subprocess import run, PIPE\n from typing import List, Optional, Dict, Union\n \n@@ -6,7 +8,7 @@\n import numpy as np\n import yaml\n \n-from lhotse.utils import Pathlike, INT16MAX, DummySet\n+from lhotse.utils import Pathlike, SetContainingAnything\n \n Channels = Union[int, List[int]]\n \n@@ -16,9 +18,8 @@\n \"\"\"\n AudioSource represents audio data that can be retrieved from somewhere.\n Supported sources of audio are currently:\n- - a file (possibly multi-channel)\n- - a command/unix pipe (single-channel only)\n- - a collection of any of the above (see AudioSourceCollection)\n+ - a file (formats supported by librosa, possibly multi-channel)\n+ - a command/unix pipe (must be WAVE, possibly multi-channel)\n \"\"\"\n type: str\n channel_ids: List[int]\n@@ -29,24 +30,31 @@\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None\n ) -> np.ndarray:\n+ \"\"\"\n+ Load the AudioSource (both files and commands) with librosa,\n+ accounting for many audio formats and multi-channel inputs.\n+ Returns numpy array with shapes: (n_samples) for single-channel,\n+ (n_channels, n_samples) for multi-channel.\n+ \"\"\"\n assert self.type in ('file', 'command')\n \n- if self.type == 'file':\n- # TODO(pzelasko): make sure that librosa loads multi-channel audio\n- # in the expected format (n_channels, n_samples)\n- return librosa.load(\n- self.source,\n- sr=None, # 'None' uses the native sampling rate\n- offset=offset_seconds,\n- duration=duration_seconds\n- )[0] # discard returned sampling rate\n+ if self.type == 'command':\n+ if offset_seconds != 0.0 or duration_seconds is not None:\n+ # TODO(pzelasko): How should we support chunking for commands?\n+ # We risk being very inefficient when reading many chunks from the same file\n+ # without some caching scheme, because we'll be re-running commands.\n+ raise ValueError(\"Reading audio chunks from command AudioSource type is currently not supported.\")\n+ source = BytesIO(run(self.source, shell=True, stdout=PIPE).stdout)\n+ else:\n+ source = self.source\n \n- # TODO(pzelasko): the following naively assumes we're dealing with raw PCM...\n- # not sure if that's how we should do it\n- # also, how should we support chunking for commands?\n- raw_audio = run(self.source, shell=True, stdout=PIPE).stdout\n- int16_audio = np.frombuffer(raw_audio, dtype=np.int16)\n- return int16_audio / INT16MAX\n+ return librosa.load(\n+ source,\n+ sr=None, # 'None' uses the native sampling rate\n+ mono=False, # Retain multi-channel if it's there\n+ offset=offset_seconds,\n+ duration=duration_seconds\n+ )[0] # discard returned sampling rate\n \n \n @dataclass\n@@ -74,7 +82,7 @@\n duration_seconds: Optional[float] = None\n ) -> np.ndarray:\n if channels is None:\n- channels = DummySet()\n+ channels = SetContainingAnything()\n elif isinstance(channels, int):\n channels = frozenset([channels])\n else:\ndiff --git a/lhotse/utils.py b/lhotse/utils.py\n--- a/lhotse/utils.py\n+++ b/lhotse/utils.py\n@@ -6,7 +6,7 @@\n INT16MAX = 32768\n \n \n-class DummySet:\n+class SetContainingAnything:\n def __contains__(self, item):\n return True\n", "issue": "comment\nwould be nice to have a comment here mentioning that DummySet contains everything.\r\n\r\nhttps://github.com/pzelasko/lhotse/blob/7555df605def57836c9454ae44aac95c504d86b0/lhotse/audio.py#L77\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Union\n\nPathlike = Union[Path, str]\n\nINT16MAX = 32768\n\n\nclass DummySet:\n def __contains__(self, item):\n return True\n\n def intersection(self, iterable):\n return True\n", "path": "lhotse/utils.py"}, {"content": "from dataclasses import dataclass, asdict\nfrom subprocess import run, PIPE\nfrom typing import List, Optional, Dict, Union\n\nimport librosa\nimport numpy as np\nimport yaml\n\nfrom lhotse.utils import Pathlike, INT16MAX, DummySet\n\nChannels = Union[int, List[int]]\n\n\n@dataclass\nclass AudioSource:\n \"\"\"\n AudioSource represents audio data that can be retrieved from somewhere.\n Supported sources of audio are currently:\n - a file (possibly multi-channel)\n - a command/unix pipe (single-channel only)\n - a collection of any of the above (see AudioSourceCollection)\n \"\"\"\n type: str\n channel_ids: List[int]\n source: str\n\n def load_audio(\n self,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None\n ) -> np.ndarray:\n assert self.type in ('file', 'command')\n\n if self.type == 'file':\n # TODO(pzelasko): make sure that librosa loads multi-channel audio\n # in the expected format (n_channels, n_samples)\n return librosa.load(\n self.source,\n sr=None, # 'None' uses the native sampling rate\n offset=offset_seconds,\n duration=duration_seconds\n )[0] # discard returned sampling rate\n\n # TODO(pzelasko): the following naively assumes we're dealing with raw PCM...\n # not sure if that's how we should do it\n # also, how should we support chunking for commands?\n raw_audio = run(self.source, shell=True, stdout=PIPE).stdout\n int16_audio = np.frombuffer(raw_audio, dtype=np.int16)\n return int16_audio / INT16MAX\n\n\n@dataclass\nclass Recording:\n \"\"\"\n Recording represents an AudioSource along with some metadata.\n \"\"\"\n id: str\n sources: List[AudioSource]\n sampling_rate: int\n num_samples: int\n duration_seconds: float\n\n def __post_init__(self):\n self.sources = [AudioSource(**s) if isinstance(s, dict) else s for s in self.sources]\n\n @property\n def num_channels(self):\n return sum(len(source.channel_ids) for source in self.sources)\n\n def load_audio(\n self,\n channels: Optional[Channels] = None,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None\n ) -> np.ndarray:\n if channels is None:\n channels = DummySet()\n elif isinstance(channels, int):\n channels = frozenset([channels])\n else:\n channels = frozenset(channels)\n\n samples_per_source = []\n for source in self.sources:\n # Case: source not requested\n if not channels.intersection(source.channel_ids):\n continue\n samples = source.load_audio(offset_seconds=offset_seconds, duration_seconds=duration_seconds)\n\n # Case: two-channel audio file but only one channel requested\n # it might not be optimal to load all channels, but IDK if there's anything we can do about it\n channels_to_remove = [\n idx for idx, cid in enumerate(source.channel_ids)\n if cid not in channels\n ]\n if channels_to_remove:\n samples = np.delete(samples, channels_to_remove, axis=0)\n samples_per_source.append(samples)\n\n # shapes: single-channel (n_samples); multi-channel (n_channels, n_samples)\n return np.vstack(samples_per_source) if len(samples_per_source) > 1 else samples_per_source[0]\n\n\n@dataclass\nclass AudioSet:\n \"\"\"\n AudioSet represents a dataset of recordings. It does not contain any annotation -\n just the information needed to retrieve a recording (possibly multi-channel, from files\n or from shell commands and pipes) and some metadata for each of them.\n\n It also supports (de)serialization to/from YAML and takes care of mapping between\n rich Python classes and YAML primitives during conversion.\n \"\"\"\n recordings: Dict[str, Recording]\n\n @staticmethod\n def from_yaml(path: Pathlike) -> 'AudioSet':\n with open(path) as f:\n recordings = (Recording(**raw_rec) for raw_rec in yaml.safe_load(f))\n return AudioSet(recordings={r.id: r for r in recordings})\n\n def to_yaml(self, path: Pathlike):\n with open(path, 'w') as f:\n yaml.safe_dump([asdict(r) for r in self.recordings.values()], stream=f)\n\n def load_audio(\n self,\n recording_id: str,\n channels: Optional[Channels] = None,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None\n ) -> np.ndarray:\n return self.recordings[recording_id].load_audio(\n channels=channels,\n offset_seconds=offset_seconds,\n duration_seconds=duration_seconds\n )\n\n def num_channels(self, recording_id: str) -> int:\n return self.recordings[recording_id].num_channels\n\n def sampling_rate(self, recording_id: str) -> int:\n return self.recordings[recording_id].sampling_rate\n\n def num_samples(self, recording_id: str) -> int:\n return self.recordings[recording_id].num_samples\n\n def duration_seconds(self, recording_id: str) -> float:\n return self.recordings[recording_id].duration_seconds\n\n def __iter__(self):\n return iter(self.recordings.values())\n", "path": "lhotse/audio.py"}]}
| 2,256 | 926 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.